comment
stringlengths
16
8.84k
method_body
stringlengths
37
239k
target_code
stringlengths
0
242
method_body_after
stringlengths
29
239k
context_before
stringlengths
14
424k
context_after
stringlengths
14
284k
Can't we simply return `values.clone`?
public byte[] getBytes() { BByteArray copy = (BByteArray) this.copy(); return copy.values; }
return copy.values;
public byte[] getBytes() { return values.clone(); }
class BByteArray extends BNewArray { private static BType arrayType = new BArrayType(BTypes.typeByte); private byte[] values; public BByteArray(byte[] values) { this.values = values; this.size = values.length; } public BByteArray() { values = (byte[]) newArrayInstance(Byte.TYPE); } public void add(long index, byte value) { prepareForAdd(index, values.length); values[(int) index] = value; } public byte get(long index) { rangeCheckForGet(index, size); return values[(int) index]; } @Override public BType getType() { return arrayType; } @Override public void grow(int newLength) { values = Arrays.copyOf(values, newLength); } @Override public BValue copy() { BByteArray byteArray = new BByteArray(Arrays.copyOf(values, values.length)); byteArray.size = this.size; return byteArray; } @Override public String stringValue() { StringJoiner sj = new StringJoiner(", ", "[", "]"); for (int i = 0; i < size; i++) { sj.add("\'" + Byte.toUnsignedInt(values[i]) + "\'"); } return sj.toString(); } @Override public BValue getBValue(long index) { return new BByte(get(index)); } }
class BByteArray extends BNewArray { private static BType arrayType = new BArrayType(BTypes.typeByte); private byte[] values; public BByteArray(byte[] values) { this.values = values; this.size = values.length; } public BByteArray() { values = (byte[]) newArrayInstance(Byte.TYPE); } public void add(long index, byte value) { prepareForAdd(index, values.length); values[(int) index] = value; } public byte get(long index) { rangeCheckForGet(index, size); return values[(int) index]; } @Override public BType getType() { return arrayType; } @Override public void grow(int newLength) { values = Arrays.copyOf(values, newLength); } @Override public BValue copy() { BByteArray byteArray = new BByteArray(Arrays.copyOf(values, values.length)); byteArray.size = this.size; return byteArray; } @Override public String stringValue() { StringJoiner sj = new StringJoiner(", ", "[", "]"); for (int i = 0; i < size; i++) { sj.add("\'" + Byte.toUnsignedInt(values[i]) + "\'"); } return sj.toString(); } @Override public BValue getBValue(long index) { return new BByte(get(index)); } }
I think we can remove the DEFAULT_CLUSTER also. The feature is useless but only has compatibility.
public TGetDBPrivsResult getDBPrivs(TGetDBPrivsParams params) throws TException { LOG.debug("get database privileges request: {}", params); TGetDBPrivsResult result = new TGetDBPrivsResult(); List<TDBPrivDesc> tDBPrivs = Lists.newArrayList(); result.setDb_privs(tDBPrivs); UserIdentity currentUser = UserIdentity.fromThrift(params.current_user_ident); List<DbPrivEntry> dbPrivEntries = GlobalStateMgr.getCurrentState().getAuth().getDBPrivEntries(currentUser); for (DbPrivEntry entry : dbPrivEntries) { PrivBitSet savedPrivs = entry.getPrivSet(); String clusterPrefix = SystemInfoService.DEFAULT_CLUSTER + ClusterNamespace.CLUSTER_DELIMITER; String userIdentStr = currentUser.toString().replace(clusterPrefix, ""); String dbName = entry.getOrigDb(); boolean isGrantable = savedPrivs.satisfy(PrivPredicate.GRANT); List<TDBPrivDesc> tPrivs = savedPrivs.toPrivilegeList().stream().map( priv -> { TDBPrivDesc privDesc = new TDBPrivDesc(); privDesc.setDb_name(dbName); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv.getUpperNameForMysql()); return privDesc; } ).collect(Collectors.toList()); if (savedPrivs.satisfy(PrivPredicate.LOAD)) { tPrivs.addAll(Lists.newArrayList("INSERT", "UPDATE", "DELETE").stream().map(priv -> { TDBPrivDesc privDesc = new TDBPrivDesc(); privDesc.setDb_name(dbName); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv); return privDesc; }).collect(Collectors.toList())); } tDBPrivs.addAll(tPrivs); } return result; }
String clusterPrefix = SystemInfoService.DEFAULT_CLUSTER + ClusterNamespace.CLUSTER_DELIMITER;
public TGetDBPrivsResult getDBPrivs(TGetDBPrivsParams params) throws TException { LOG.debug("get database privileges request: {}", params); TGetDBPrivsResult result = new TGetDBPrivsResult(); List<TDBPrivDesc> tDBPrivs = Lists.newArrayList(); result.setDb_privs(tDBPrivs); UserIdentity currentUser = UserIdentity.fromThrift(params.current_user_ident); List<DbPrivEntry> dbPrivEntries = GlobalStateMgr.getCurrentState().getAuth().getDBPrivEntries(currentUser); for (DbPrivEntry entry : dbPrivEntries) { PrivBitSet savedPrivs = entry.getPrivSet(); String clusterPrefix = SystemInfoService.DEFAULT_CLUSTER + ClusterNamespace.CLUSTER_DELIMITER; String userIdentStr = currentUser.toString().replace(clusterPrefix, ""); String dbName = entry.getOrigDb(); boolean isGrantable = savedPrivs.satisfy(PrivPredicate.GRANT); List<TDBPrivDesc> tPrivs = savedPrivs.toPrivilegeList().stream().map( priv -> { TDBPrivDesc privDesc = new TDBPrivDesc(); privDesc.setDb_name(dbName); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv.getUpperNameForMysql()); return privDesc; } ).collect(Collectors.toList()); if (savedPrivs.satisfy(PrivPredicate.LOAD)) { tPrivs.addAll(Lists.newArrayList("INSERT", "UPDATE", "DELETE").stream().map(priv -> { TDBPrivDesc privDesc = new TDBPrivDesc(); privDesc.setDb_name(dbName); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv); return privDesc; }).collect(Collectors.toList())); } tDBPrivs.addAll(tPrivs); } return result; }
class FrontendServiceImpl implements FrontendService.Iface { private static final Logger LOG = LogManager.getLogger(LeaderImpl.class); private LeaderImpl leaderImpl; private ExecuteEnv exeEnv; public FrontendServiceImpl(ExecuteEnv exeEnv) { leaderImpl = new LeaderImpl(); this.exeEnv = exeEnv; } @Override public TGetDbsResult getDbNames(TGetDbsParams params) throws TException { LOG.debug("get db request: {}", params); TGetDbsResult result = new TGetDbsResult(); List<String> dbs = Lists.newArrayList(); PatternMatcher matcher = null; if (params.isSetPattern()) { try { matcher = PatternMatcher.createMysqlPattern(params.getPattern(), CaseSensibility.DATABASE.getCaseSensibility()); } catch (AnalysisException e) { throw new TException("Pattern is in bad format: " + params.getPattern()); } } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); LOG.debug("get db names: {}", dbNames); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip); } for (String fullName : dbNames) { if (!globalStateMgr.getAuth().checkDbPriv(currentUser, fullName, PrivPredicate.SHOW)) { continue; } final String db = ClusterNamespace.getNameFromFullName(fullName); if (matcher != null && !matcher.match(db)) { continue; } dbs.add(fullName); } result.setDbs(dbs); return result; } @Override public TGetTablesResult getTableNames(TGetTablesParams params) throws TException { LOG.debug("get table name request: {}", params); TGetTablesResult result = new TGetTablesResult(); List<String> tablesResult = Lists.newArrayList(); result.setTables(tablesResult); PatternMatcher matcher = null; if (params.isSetPattern()) { try { matcher = PatternMatcher.createMysqlPattern(params.getPattern(), CaseSensibility.TABLE.getCaseSensibility()); } catch (AnalysisException e) { throw new TException("Pattern is in bad format: " + params.getPattern()); } } Database db = GlobalStateMgr.getCurrentState().getDb(params.db); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip); } if (db != null) { for (String tableName : db.getTableNamesWithLock()) { LOG.debug("get table: {}, wait to check", tableName); if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, params.db, tableName, PrivPredicate.SHOW)) { continue; } if (matcher != null && !matcher.match(tableName)) { continue; } tablesResult.add(tableName); } } return result; } @Override public TListTableStatusResult listTableStatus(TGetTablesParams params) throws TException { LOG.debug("get list table request: {}", params); TListTableStatusResult result = new TListTableStatusResult(); List<TTableStatus> tablesResult = Lists.newArrayList(); result.setTables(tablesResult); PatternMatcher matcher = null; if (params.isSetPattern()) { try { matcher = PatternMatcher.createMysqlPattern(params.getPattern(), CaseSensibility.TABLE.getCaseSensibility()); } catch (AnalysisException e) { throw new TException("Pattern is in bad format " + params.getPattern()); } } Database db = GlobalStateMgr.getCurrentState().getDb(params.db); long limit = params.isSetLimit() ? params.getLimit() : -1; UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip); } if (params.isSetType() && TTableType.MATERIALIZED_VIEW.equals(params.getType())) { listMaterializedViewStatus(tablesResult, limit, matcher, currentUser, params.db); return result; } if (db != null) { db.readLock(); try { boolean listingViews = params.isSetType() && TTableType.VIEW.equals(params.getType()); List<Table> tables = listingViews ? db.getViews() : db.getTables(); for (Table table : tables) { if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, params.db, table.getName(), PrivPredicate.SHOW)) { continue; } if (matcher != null && !matcher.match(table.getName())) { continue; } TTableStatus status = new TTableStatus(); status.setName(table.getName()); status.setType(table.getMysqlType()); status.setEngine(table.getEngine()); status.setComment(table.getComment()); status.setCreate_time(table.getCreateTime()); status.setLast_check_time(table.getLastCheckTime()); if (listingViews) { View view = (View) table; String ddlSql = view.getInlineViewDef(); List<TableRef> tblRefs = new ArrayList<>(); view.getQueryStmt().collectTableRefs(tblRefs); for (TableRef tblRef : tblRefs) { if (!GlobalStateMgr.getCurrentState().getAuth() .checkTblPriv(currentUser, tblRef.getName().getDb(), tblRef.getName().getTbl(), PrivPredicate.SHOW)) { ddlSql = ""; break; } } status.setDdl_sql(ddlSql); } tablesResult.add(status); if (limit > 0 && tablesResult.size() >= limit) { break; } } } finally { db.readUnlock(); } } return result; } public void listMaterializedViewStatus(List<TTableStatus> tablesResult, long limit, PatternMatcher matcher, UserIdentity currentUser, String dbName) { Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (db == null) { LOG.warn("database not exists: {}", dbName); return; } db.readLock(); try { for (Table materializedView : db.getMaterializedViews()) { if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, dbName, materializedView.getName(), PrivPredicate.SHOW)) { continue; } if (matcher != null && !matcher.match(materializedView.getName())) { continue; } MaterializedView mvTable = (MaterializedView) materializedView; List<String> createTableStmt = Lists.newArrayList(); GlobalStateMgr.getDdlStmt(mvTable, createTableStmt, null, null, false, true); String ddlSql = createTableStmt.get(0); TTableStatus status = new TTableStatus(); status.setId(String.valueOf(mvTable.getId())); status.setName(mvTable.getName()); status.setDdl_sql(ddlSql); status.setRows(String.valueOf(mvTable.getRowCount())); status.setType(mvTable.getMysqlType()); status.setComment(mvTable.getComment()); tablesResult.add(status); if (limit > 0 && tablesResult.size() >= limit) { return; } } for (Table table : db.getTables()) { if (table.getType() == Table.TableType.OLAP) { OlapTable olapTable = (OlapTable) table; List<MaterializedIndex> visibleMaterializedViews = olapTable.getVisibleIndex(); long baseIdx = olapTable.getBaseIndexId(); for (MaterializedIndex mvIdx : visibleMaterializedViews) { if (baseIdx == mvIdx.getId()) { continue; } if (matcher != null && !matcher.match(olapTable.getIndexNameById(mvIdx.getId()))) { continue; } MaterializedIndexMeta mvMeta = olapTable.getVisibleIndexIdToMeta().get(mvIdx.getId()); TTableStatus status = new TTableStatus(); status.setId(String.valueOf(mvIdx.getId())); status.setName(olapTable.getIndexNameById(mvIdx.getId())); if (mvMeta.getOriginStmt() == null) { StringBuilder originStmtBuilder = new StringBuilder( "create materialized view " + olapTable.getIndexNameById(mvIdx.getId()) + " as select "); String groupByString = ""; for (Column column : mvMeta.getSchema()) { if (column.isKey()) { groupByString += column.getName() + ","; } } originStmtBuilder.append(groupByString); for (Column column : mvMeta.getSchema()) { if (!column.isKey()) { originStmtBuilder.append(column.getAggregationType().toString()).append("(") .append(column.getName()).append(")").append(","); } } originStmtBuilder.delete(originStmtBuilder.length() - 1, originStmtBuilder.length()); originStmtBuilder.append(" from ").append(olapTable.getName()).append(" group by ") .append(groupByString); originStmtBuilder.delete(originStmtBuilder.length() - 1, originStmtBuilder.length()); status.setDdl_sql(originStmtBuilder.toString()); } else { status.setDdl_sql(mvMeta.getOriginStmt().replace("\n", "").replace("\t", "") .replaceAll("[ ]+", " ")); } status.setRows(String.valueOf(mvIdx.getRowCount())); status.setType(""); status.setComment(""); tablesResult.add(status); if (limit > 0 && tablesResult.size() >= limit) { return; } } } } } finally { db.readUnlock(); } } @Override public TGetTaskInfoResult getTasks(TGetTasksParams params) throws TException { LOG.debug("get show task request: {}", params); TGetTaskInfoResult result = new TGetTaskInfoResult(); List<TTaskInfo> tasksResult = Lists.newArrayList(); result.setTasks(tasksResult); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); TaskManager taskManager = globalStateMgr.getTaskManager(); List<Task> taskList = taskManager.showTasks(null); for (Task task : taskList) { if (!globalStateMgr.getAuth().checkDbPriv(currentUser, task.getDbName(), PrivPredicate.SHOW)) { continue; } TTaskInfo info = new TTaskInfo(); info.setTask_name(task.getName()); info.setCreate_time(task.getCreateTime() / 1000); String scheduleStr = task.getType().name(); if (task.getType() == Constants.TaskType.PERIODICAL) { scheduleStr += task.getSchedule(); } info.setSchedule(scheduleStr); info.setDatabase(ClusterNamespace.getNameFromFullName(task.getDbName())); info.setDefinition(task.getDefinition()); info.setExpire_time(task.getExpireTime() / 1000); tasksResult.add(info); } return result; } @Override public TGetTaskRunInfoResult getTaskRuns(TGetTasksParams params) throws TException { LOG.debug("get show task run request: {}", params); TGetTaskRunInfoResult result = new TGetTaskRunInfoResult(); List<TTaskRunInfo> tasksResult = Lists.newArrayList(); result.setTask_runs(tasksResult); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); TaskManager taskManager = globalStateMgr.getTaskManager(); List<TaskRunStatus> taskRunList = taskManager.showTaskRunStatus(null); for (TaskRunStatus status : taskRunList) { if (!globalStateMgr.getAuth().checkDbPriv(currentUser, status.getDbName(), PrivPredicate.SHOW)) { continue; } TTaskRunInfo info = new TTaskRunInfo(); info.setQuery_id(status.getQueryId()); info.setTask_name(status.getTaskName()); info.setCreate_time(status.getCreateTime() / 1000); info.setFinish_time(status.getFinishTime() / 1000); info.setState(status.getState().toString()); info.setDatabase(ClusterNamespace.getNameFromFullName(status.getDbName())); info.setDefinition(status.getDefinition()); info.setError_code(status.getErrorCode()); info.setError_message(status.getErrorMessage()); info.setExpire_time(status.getExpireTime() / 1000); tasksResult.add(info); } return result; } @Override @Override public TGetTablePrivsResult getTablePrivs(TGetTablePrivsParams params) throws TException { LOG.debug("get table privileges request: {}", params); TGetTablePrivsResult result = new TGetTablePrivsResult(); List<TTablePrivDesc> tTablePrivs = Lists.newArrayList(); result.setTable_privs(tTablePrivs); UserIdentity currentUser = UserIdentity.fromThrift(params.current_user_ident); List<TablePrivEntry> tablePrivEntries = GlobalStateMgr.getCurrentState().getAuth().getTablePrivEntries(currentUser); for (TablePrivEntry entry : tablePrivEntries) { PrivBitSet savedPrivs = entry.getPrivSet(); String clusterPrefix = SystemInfoService.DEFAULT_CLUSTER + ClusterNamespace.CLUSTER_DELIMITER; String userIdentStr = currentUser.toString().replace(clusterPrefix, ""); String dbName = entry.getOrigDb(); boolean isGrantable = savedPrivs.satisfy(PrivPredicate.GRANT); List<TTablePrivDesc> tPrivs = savedPrivs.toPrivilegeList().stream().map( priv -> { TTablePrivDesc privDesc = new TTablePrivDesc(); privDesc.setDb_name(dbName); privDesc.setTable_name(entry.getOrigTbl()); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv.getUpperNameForMysql()); return privDesc; } ).collect(Collectors.toList()); if (savedPrivs.satisfy(PrivPredicate.LOAD)) { tPrivs.addAll(Lists.newArrayList("INSERT", "UPDATE", "DELETE").stream().map(priv -> { TTablePrivDesc privDesc = new TTablePrivDesc(); privDesc.setDb_name(dbName); privDesc.setTable_name(entry.getOrigTbl()); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv); return privDesc; }).collect(Collectors.toList())); } tTablePrivs.addAll(tPrivs); } return result; } @Override public TGetUserPrivsResult getUserPrivs(TGetUserPrivsParams params) throws TException { LOG.debug("get user privileges request: {}", params); TGetUserPrivsResult result = new TGetUserPrivsResult(); List<TUserPrivDesc> tUserPrivs = Lists.newArrayList(); result.setUser_privs(tUserPrivs); UserIdentity currentUser = UserIdentity.fromThrift(params.current_user_ident); Auth currAuth = GlobalStateMgr.getCurrentState().getAuth(); UserPrivTable userPrivTable = currAuth.getUserPrivTable(); List<UserIdentity> userIdents = Lists.newArrayList(); userIdents.add(currentUser); for (UserIdentity userIdent : userIdents) { PrivBitSet savedPrivs = new PrivBitSet(); userPrivTable.getPrivs(userIdent, savedPrivs); String clusterPrefix = SystemInfoService.DEFAULT_CLUSTER + ClusterNamespace.CLUSTER_DELIMITER; String userIdentStr = currentUser.toString().replace(clusterPrefix, ""); List<TUserPrivDesc> tPrivs = savedPrivs.toPrivilegeList().stream().map( priv -> { boolean isGrantable = Privilege.NODE_PRIV != priv && userPrivTable.hasPriv(userIdent, PrivPredicate.GRANT); TUserPrivDesc privDesc = new TUserPrivDesc(); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv.getUpperNameForMysql()); return privDesc; } ).collect(Collectors.toList()); tUserPrivs.addAll(tPrivs); } return result; } @Override public TFeResult updateExportTaskStatus(TUpdateExportTaskStatusRequest request) throws TException { TStatus status = new TStatus(TStatusCode.OK); TFeResult result = new TFeResult(FrontendServiceVersion.V1, status); return result; } @Override public TDescribeTableResult describeTable(TDescribeTableParams params) throws TException { LOG.debug("get desc table request: {}", params); TDescribeTableResult result = new TDescribeTableResult(); List<TColumnDef> columns = Lists.newArrayList(); result.setColumns(columns); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip); } long limit = params.isSetLimit() ? params.getLimit() : -1; if (!params.isSetDb() && StringUtils.isBlank(params.getTable_name())) { describeWithoutDbAndTable(currentUser, columns, limit); return result; } if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, params.db, params.getTable_name(), PrivPredicate.SHOW)) { return result; } Database db = GlobalStateMgr.getCurrentState().getDb(params.db); if (db != null) { db.readLock(); try { Table table = db.getTable(params.getTable_name()); setColumnDesc(columns, table, limit, false, params.db, params.getTable_name()); } finally { db.readUnlock(); } } return result; } private void describeWithoutDbAndTable(UserIdentity currentUser, List<TColumnDef> columns, long limit) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); boolean reachLimit; for (String fullName : dbNames) { if (!GlobalStateMgr.getCurrentState().getAuth().checkDbPriv(currentUser, fullName, PrivPredicate.SHOW)) { continue; } Database db = GlobalStateMgr.getCurrentState().getDb(fullName); if (db != null) { for (String tableName : db.getTableNamesWithLock()) { LOG.debug("get table: {}, wait to check", tableName); if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, fullName, tableName, PrivPredicate.SHOW)) { continue; } db.readLock(); try { Table table = db.getTable(tableName); reachLimit = setColumnDesc(columns, table, limit, true, fullName, tableName); } finally { db.readUnlock(); } if (reachLimit) { return; } } } } } private boolean setColumnDesc(List<TColumnDef> columns, Table table, long limit, boolean needSetDbAndTable, String db, String tbl) { if (table != null) { String tableKeysType = ""; if (TableType.OLAP.equals(table.getType())) { OlapTable olapTable = (OlapTable) table; tableKeysType = olapTable.getKeysType().name().substring(0, 3).toUpperCase(); } for (Column column : table.getBaseSchema()) { final TColumnDesc desc = new TColumnDesc(column.getName(), column.getPrimitiveType().toThrift()); final Integer precision = column.getType().getPrecision(); if (precision != null) { desc.setColumnPrecision(precision); } final Integer columnLength = column.getType().getColumnSize(); if (columnLength != null) { desc.setColumnLength(columnLength); } final Integer decimalDigits = column.getType().getDecimalDigits(); if (decimalDigits != null) { desc.setColumnScale(decimalDigits); } if (column.isKey()) { desc.setColumnKey(tableKeysType); } else { desc.setColumnKey(""); } final TColumnDef colDef = new TColumnDef(desc); final String comment = column.getComment(); if (comment != null) { colDef.setComment(comment); } columns.add(colDef); if (needSetDbAndTable) { columns.get(columns.size() - 1).columnDesc.setDbName(db); columns.get(columns.size() - 1).columnDesc.setTableName(tbl); } if (limit > 0 && columns.size() >= limit) { return true; } } } return false; } @Override public TShowVariableResult showVariables(TShowVariableRequest params) throws TException { TShowVariableResult result = new TShowVariableResult(); Map<String, String> map = Maps.newHashMap(); result.setVariables(map); ConnectContext ctx = exeEnv.getScheduler().getContext(params.getThreadId()); if (ctx == null) { return result; } List<List<String>> rows = VariableMgr.dump(SetType.fromThrift(params.getVarType()), ctx.getSessionVariable(), null); for (List<String> row : rows) { map.put(row.get(0), row.get(1)); } return result; } @Override public TReportExecStatusResult reportExecStatus(TReportExecStatusParams params) throws TException { return QeProcessorImpl.INSTANCE.reportExecStatus(params, getClientAddr()); } @Override public TMasterResult finishTask(TFinishTaskRequest request) throws TException { return leaderImpl.finishTask(request); } @Override public TMasterResult report(TReportRequest request) throws TException { return leaderImpl.report(request); } @Override public TFetchResourceResult fetchResource() throws TException { throw new TException("not supported"); } @Override public TFeResult isMethodSupported(TIsMethodSupportedRequest request) throws TException { TStatus status = new TStatus(TStatusCode.OK); TFeResult result = new TFeResult(FrontendServiceVersion.V1, status); switch (request.getFunction_name()) { case "STREAMING_MINI_LOAD": break; default: status.setStatus_code(NOT_IMPLEMENTED_ERROR); break; } return result; } @Override public TMasterOpResult forward(TMasterOpRequest params) throws TException { TNetworkAddress clientAddr = getClientAddr(); if (clientAddr != null) { Frontend fe = GlobalStateMgr.getCurrentState().getFeByHost(clientAddr.getHostname()); if (fe == null) { LOG.warn("reject request from invalid host. client: {}", clientAddr); throw new TException("request from invalid host was rejected."); } } LOG.info("receive forwarded stmt {} from FE: {}", params.getStmt_id(), clientAddr.getHostname()); ConnectContext context = new ConnectContext(null); ConnectProcessor processor = new ConnectProcessor(context); TMasterOpResult result = processor.proxyExecute(params); ConnectContext.remove(); return result; } private void checkPasswordAndPrivs(String cluster, String user, String passwd, String db, String tbl, String clientIp, PrivPredicate predicate) throws AuthenticationException { final String fullUserName = ClusterNamespace.getFullName(user); List<UserIdentity> currentUser = Lists.newArrayList(); if (!GlobalStateMgr.getCurrentState().getAuth() .checkPlainPassword(fullUserName, clientIp, passwd, currentUser)) { throw new AuthenticationException("Access denied for " + fullUserName + "@" + clientIp); } Preconditions.checkState(currentUser.size() == 1); if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser.get(0), db, tbl, predicate)) { throw new AuthenticationException( "Access denied; you need (at least one of) the LOAD privilege(s) for this operation"); } } @Override public TLoadTxnBeginResult loadTxnBegin(TLoadTxnBeginRequest request) throws TException { String clientAddr = getClientAddrAsString(); LOG.info("receive txn begin request, db: {}, tbl: {}, label: {}, backend: {}", request.getDb(), request.getTbl(), request.getLabel(), clientAddr); LOG.debug("txn begin request: {}", request); TLoadTxnBeginResult result = new TLoadTxnBeginResult(); if (!GlobalStateMgr.getCurrentState().isLeader()) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList("current fe is not master")); result.setStatus(status); return result; } TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { result.setTxnId(loadTxnBeginImpl(request, clientAddr)); } catch (DuplicatedRequestException e) { LOG.info("duplicate request for stream load. request id: {}, txn_id: {}", e.getDuplicatedRequestId(), e.getTxnId()); result.setTxnId(e.getTxnId()); } catch (LabelAlreadyUsedException e) { status.setStatus_code(TStatusCode.LABEL_ALREADY_EXISTS); status.addToError_msgs(e.getMessage()); result.setJob_status(e.getJobStatus()); } catch (UserException e) { LOG.warn("failed to begin: {}", e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private long loadTxnBeginImpl(TLoadTxnBeginRequest request, String clientIp) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } checkPasswordAndPrivs(cluster, request.getUser(), request.getPasswd(), request.getDb(), request.getTbl(), request.getUser_ip(), PrivPredicate.LOAD); if (Strings.isNullOrEmpty(request.getLabel())) { throw new UserException("empty label in begin request"); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); String dbName = request.getDb(); Database db = globalStateMgr.getDb(dbName); if (db == null) { throw new UserException("unknown database, database=" + dbName); } Table table = db.getTable(request.getTbl()); if (table == null) { throw new UserException("unknown table \"" + request.getDb() + "." + request.getTbl() + "\""); } long timeoutSecond = request.isSetTimeout() ? request.getTimeout() : Config.stream_load_default_timeout_second; MetricRepo.COUNTER_LOAD_ADD.increase(1L); return GlobalStateMgr.getCurrentGlobalTransactionMgr().beginTransaction( db.getId(), Lists.newArrayList(table.getId()), request.getLabel(), request.getRequest_id(), new TxnCoordinator(TxnSourceType.BE, clientIp), TransactionState.LoadJobSourceType.BACKEND_STREAMING, -1, timeoutSecond); } @Override public TLoadTxnCommitResult loadTxnCommit(TLoadTxnCommitRequest request) throws TException { String clientAddr = getClientAddrAsString(); LOG.info("receive txn commit request. db: {}, tbl: {}, txn_id: {}, backend: {}", request.getDb(), request.getTbl(), request.getTxnId(), clientAddr); LOG.debug("txn commit request: {}", request); TLoadTxnCommitResult result = new TLoadTxnCommitResult(); if (!GlobalStateMgr.getCurrentState().isLeader()) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList("current fe is not master")); result.setStatus(status); return result; } TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { if (!loadTxnCommitImpl(request)) { status.setStatus_code(TStatusCode.PUBLISH_TIMEOUT); status.addToError_msgs("Publish timeout. The data will be visible after a while"); } } catch (UserException e) { LOG.warn("failed to commit txn_id: {}: {}", request.getTxnId(), e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private boolean loadTxnCommitImpl(TLoadTxnCommitRequest request) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } if (request.isSetAuth_code()) { } else { checkPasswordAndPrivs(cluster, request.getUser(), request.getPasswd(), request.getDb(), request.getTbl(), request.getUser_ip(), PrivPredicate.LOAD); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); String dbName = request.getDb(); Database db = globalStateMgr.getDb(dbName); if (db == null) { throw new UserException("unknown database, database=" + dbName); } TxnCommitAttachment attachment = TxnCommitAttachment.fromThrift(request.txnCommitAttachment); long timeoutMs = request.isSetThrift_rpc_timeout_ms() ? request.getThrift_rpc_timeout_ms() : 5000; timeoutMs = timeoutMs * 3 / 4; boolean ret = GlobalStateMgr.getCurrentGlobalTransactionMgr().commitAndPublishTransaction( db, request.getTxnId(), TabletCommitInfo.fromThrift(request.getCommitInfos()), timeoutMs, attachment); if (!ret) { return ret; } MetricRepo.COUNTER_LOAD_FINISHED.increase(1L); if (null == attachment) { return ret; } Table tbl = db.getTable(request.getTbl()); if (null == tbl) { return ret; } TableMetricsEntity entity = TableMetricsRegistry.getInstance().getMetricsEntity(tbl.getId()); switch (request.txnCommitAttachment.getLoadType()) { case ROUTINE_LOAD: if (!(attachment instanceof RLTaskTxnCommitAttachment)) { break; } RLTaskTxnCommitAttachment routineAttachment = (RLTaskTxnCommitAttachment) attachment; entity.counterRoutineLoadFinishedTotal.increase(1L); entity.counterRoutineLoadBytesTotal.increase(routineAttachment.getReceivedBytes()); entity.counterRoutineLoadRowsTotal.increase(routineAttachment.getLoadedRows()); break; case MANUAL_LOAD: if (!(attachment instanceof ManualLoadTxnCommitAttachment)) { break; } ManualLoadTxnCommitAttachment streamAttachment = (ManualLoadTxnCommitAttachment) attachment; entity.counterStreamLoadFinishedTotal.increase(1L); entity.counterStreamLoadBytesTotal.increase(streamAttachment.getReceivedBytes()); entity.counterStreamLoadRowsTotal.increase(streamAttachment.getLoadedRows()); break; default: break; } return ret; } @Override public TLoadTxnCommitResult loadTxnPrepare(TLoadTxnCommitRequest request) throws TException { String clientAddr = getClientAddrAsString(); LOG.info("receive txn prepare request. db: {}, tbl: {}, txn_id: {}, backend: {}", request.getDb(), request.getTbl(), request.getTxnId(), clientAddr); LOG.debug("txn prepare request: {}", request); TLoadTxnCommitResult result = new TLoadTxnCommitResult(); if (!GlobalStateMgr.getCurrentState().isLeader()) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList("current fe is not master")); result.setStatus(status); return result; } TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { loadTxnPrepareImpl(request); } catch (UserException e) { LOG.warn("failed to prepare txn_id: {}: {}", request.getTxnId(), e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private void loadTxnPrepareImpl(TLoadTxnCommitRequest request) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } if (request.isSetAuth_code()) { } else { checkPasswordAndPrivs(cluster, request.getUser(), request.getPasswd(), request.getDb(), request.getTbl(), request.getUser_ip(), PrivPredicate.LOAD); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); String dbName = request.getDb(); Database db = globalStateMgr.getDb(dbName); if (db == null) { throw new UserException("unknown database, database=" + dbName); } TxnCommitAttachment attachment = TxnCommitAttachment.fromThrift(request.txnCommitAttachment); GlobalStateMgr.getCurrentGlobalTransactionMgr().prepareTransaction( db.getId(), request.getTxnId(), TabletCommitInfo.fromThrift(request.getCommitInfos()), attachment); } @Override public TLoadTxnRollbackResult loadTxnRollback(TLoadTxnRollbackRequest request) throws TException { String clientAddr = getClientAddrAsString(); LOG.info("receive txn rollback request. db: {}, tbl: {}, txn_id: {}, reason: {}, backend: {}", request.getDb(), request.getTbl(), request.getTxnId(), request.getReason(), clientAddr); LOG.debug("txn rollback request: {}", request); TLoadTxnRollbackResult result = new TLoadTxnRollbackResult(); if (!GlobalStateMgr.getCurrentState().isLeader()) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList("current fe is not master")); result.setStatus(status); return result; } TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { loadTxnRollbackImpl(request); } catch (TransactionNotFoundException e) { LOG.warn("failed to rollback txn {}: {}", request.getTxnId(), e.getMessage()); status.setStatus_code(TStatusCode.TXN_NOT_EXISTS); status.addToError_msgs(e.getMessage()); } catch (UserException e) { LOG.warn("failed to rollback txn {}: {}", request.getTxnId(), e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private void loadTxnRollbackImpl(TLoadTxnRollbackRequest request) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } if (request.isSetAuth_code()) { } else { checkPasswordAndPrivs(cluster, request.getUser(), request.getPasswd(), request.getDb(), request.getTbl(), request.getUser_ip(), PrivPredicate.LOAD); } String dbName = request.getDb(); Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (db == null) { throw new MetaNotFoundException("db " + dbName + " does not exist"); } long dbId = db.getId(); GlobalStateMgr.getCurrentGlobalTransactionMgr().abortTransaction(dbId, request.getTxnId(), request.isSetReason() ? request.getReason() : "system cancel", TxnCommitAttachment.fromThrift(request.getTxnCommitAttachment())); } @Override public TStreamLoadPutResult streamLoadPut(TStreamLoadPutRequest request) { String clientAddr = getClientAddrAsString(); LOG.info("receive stream load put request. db:{}, tbl: {}, txn_id: {}, load id: {}, backend: {}", request.getDb(), request.getTbl(), request.getTxnId(), DebugUtil.printId(request.getLoadId()), clientAddr); LOG.debug("stream load put request: {}", request); TStreamLoadPutResult result = new TStreamLoadPutResult(); TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { result.setParams(streamLoadPutImpl(request)); } catch (UserException e) { LOG.warn("failed to get stream load plan: {}", e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private TExecPlanFragmentParams streamLoadPutImpl(TStreamLoadPutRequest request) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); String dbName = request.getDb(); Database db = globalStateMgr.getDb(dbName); if (db == null) { throw new UserException("unknown database, database=" + dbName); } long timeoutMs = request.isSetThrift_rpc_timeout_ms() ? request.getThrift_rpc_timeout_ms() : 5000; if (!db.tryReadLock(timeoutMs, TimeUnit.MILLISECONDS)) { throw new UserException("get database read lock timeout, database=" + dbName); } try { Table table = db.getTable(request.getTbl()); if (table == null) { throw new UserException("unknown table, table=" + request.getTbl()); } if (!(table instanceof OlapTable)) { throw new UserException("load table type is not OlapTable, type=" + table.getClass()); } if (table instanceof MaterializedView) { throw new UserException(String.format( "The data of '%s' cannot be inserted because '%s' is a materialized view," + "and the data of materialized view must be consistent with the base table.", table.getName(), table.getName())); } StreamLoadTask streamLoadTask = StreamLoadTask.fromTStreamLoadPutRequest(request, db); StreamLoadPlanner planner = new StreamLoadPlanner(db, (OlapTable) table, streamLoadTask); TExecPlanFragmentParams plan = planner.plan(streamLoadTask.getId()); TransactionState txnState = GlobalStateMgr.getCurrentGlobalTransactionMgr().getTransactionState(db.getId(), request.getTxnId()); if (txnState == null) { throw new UserException("txn does not exist: " + request.getTxnId()); } txnState.addTableIndexes((OlapTable) table); return plan; } finally { db.readUnlock(); } } @Override public TStatus snapshotLoaderReport(TSnapshotLoaderReportRequest request) throws TException { if (GlobalStateMgr.getCurrentState().getBackupHandler().report(request.getTask_type(), request.getJob_id(), request.getTask_id(), request.getFinished_num(), request.getTotal_num())) { return new TStatus(TStatusCode.OK); } return new TStatus(TStatusCode.CANCELLED); } @Override public TRefreshTableResponse refreshTable(TRefreshTableRequest request) throws TException { try { if (request.getCatalog_name() == null) { request.setCatalog_name(InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME); } GlobalStateMgr.getCurrentState().refreshExternalTable(new TableName(request.getCatalog_name(), request.getDb_name(), request.getTable_name()), request.getPartitions()); return new TRefreshTableResponse(new TStatus(TStatusCode.OK)); } catch (DdlException e) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList(e.getMessage())); return new TRefreshTableResponse(status); } } private TNetworkAddress getClientAddr() { ThriftServerContext connectionContext = ThriftServerEventProcessor.getConnectionContext(); if (connectionContext != null) { return connectionContext.getClient(); } return null; } private String getClientAddrAsString() { TNetworkAddress addr = getClientAddr(); return addr == null ? "unknown" : addr.hostname; } @Override public TGetTableMetaResponse getTableMeta(TGetTableMetaRequest request) throws TException { return leaderImpl.getTableMeta(request); } @Override public TBeginRemoteTxnResponse beginRemoteTxn(TBeginRemoteTxnRequest request) throws TException { return leaderImpl.beginRemoteTxn(request); } @Override public TCommitRemoteTxnResponse commitRemoteTxn(TCommitRemoteTxnRequest request) throws TException { return leaderImpl.commitRemoteTxn(request); } @Override public TAbortRemoteTxnResponse abortRemoteTxn(TAbortRemoteTxnRequest request) throws TException { return leaderImpl.abortRemoteTxn(request); } @Override public TSetConfigResponse setConfig(TSetConfigRequest request) throws TException { try { Preconditions.checkState(request.getKeys().size() == request.getValues().size()); Map<String, String> configs = new HashMap<>(); for (int i = 0; i < request.getKeys().size(); i++) { configs.put(request.getKeys().get(i), request.getValues().get(i)); } GlobalStateMgr.getCurrentState().setFrontendConfig(configs); return new TSetConfigResponse(new TStatus(TStatusCode.OK)); } catch (DdlException e) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList(e.getMessage())); return new TSetConfigResponse(status); } } }
class FrontendServiceImpl implements FrontendService.Iface { private static final Logger LOG = LogManager.getLogger(LeaderImpl.class); private LeaderImpl leaderImpl; private ExecuteEnv exeEnv; public FrontendServiceImpl(ExecuteEnv exeEnv) { leaderImpl = new LeaderImpl(); this.exeEnv = exeEnv; } @Override public TGetDbsResult getDbNames(TGetDbsParams params) throws TException { LOG.debug("get db request: {}", params); TGetDbsResult result = new TGetDbsResult(); List<String> dbs = Lists.newArrayList(); PatternMatcher matcher = null; if (params.isSetPattern()) { try { matcher = PatternMatcher.createMysqlPattern(params.getPattern(), CaseSensibility.DATABASE.getCaseSensibility()); } catch (AnalysisException e) { throw new TException("Pattern is in bad format: " + params.getPattern()); } } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); LOG.debug("get db names: {}", dbNames); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip); } for (String fullName : dbNames) { if (!globalStateMgr.getAuth().checkDbPriv(currentUser, fullName, PrivPredicate.SHOW)) { continue; } final String db = ClusterNamespace.getNameFromFullName(fullName); if (matcher != null && !matcher.match(db)) { continue; } dbs.add(fullName); } result.setDbs(dbs); return result; } @Override public TGetTablesResult getTableNames(TGetTablesParams params) throws TException { LOG.debug("get table name request: {}", params); TGetTablesResult result = new TGetTablesResult(); List<String> tablesResult = Lists.newArrayList(); result.setTables(tablesResult); PatternMatcher matcher = null; if (params.isSetPattern()) { try { matcher = PatternMatcher.createMysqlPattern(params.getPattern(), CaseSensibility.TABLE.getCaseSensibility()); } catch (AnalysisException e) { throw new TException("Pattern is in bad format: " + params.getPattern()); } } Database db = GlobalStateMgr.getCurrentState().getDb(params.db); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip); } if (db != null) { for (String tableName : db.getTableNamesWithLock()) { LOG.debug("get table: {}, wait to check", tableName); if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, params.db, tableName, PrivPredicate.SHOW)) { continue; } if (matcher != null && !matcher.match(tableName)) { continue; } tablesResult.add(tableName); } } return result; } @Override public TListTableStatusResult listTableStatus(TGetTablesParams params) throws TException { LOG.debug("get list table request: {}", params); TListTableStatusResult result = new TListTableStatusResult(); List<TTableStatus> tablesResult = Lists.newArrayList(); result.setTables(tablesResult); PatternMatcher matcher = null; if (params.isSetPattern()) { try { matcher = PatternMatcher.createMysqlPattern(params.getPattern(), CaseSensibility.TABLE.getCaseSensibility()); } catch (AnalysisException e) { throw new TException("Pattern is in bad format " + params.getPattern()); } } Database db = GlobalStateMgr.getCurrentState().getDb(params.db); long limit = params.isSetLimit() ? params.getLimit() : -1; UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip); } if (params.isSetType() && TTableType.MATERIALIZED_VIEW.equals(params.getType())) { listMaterializedViewStatus(tablesResult, limit, matcher, currentUser, params.db); return result; } if (db != null) { db.readLock(); try { boolean listingViews = params.isSetType() && TTableType.VIEW.equals(params.getType()); List<Table> tables = listingViews ? db.getViews() : db.getTables(); for (Table table : tables) { if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, params.db, table.getName(), PrivPredicate.SHOW)) { continue; } if (matcher != null && !matcher.match(table.getName())) { continue; } TTableStatus status = new TTableStatus(); status.setName(table.getName()); status.setType(table.getMysqlType()); status.setEngine(table.getEngine()); status.setComment(table.getComment()); status.setCreate_time(table.getCreateTime()); status.setLast_check_time(table.getLastCheckTime()); if (listingViews) { View view = (View) table; String ddlSql = view.getInlineViewDef(); List<TableRef> tblRefs = new ArrayList<>(); view.getQueryStmt().collectTableRefs(tblRefs); for (TableRef tblRef : tblRefs) { if (!GlobalStateMgr.getCurrentState().getAuth() .checkTblPriv(currentUser, tblRef.getName().getDb(), tblRef.getName().getTbl(), PrivPredicate.SHOW)) { ddlSql = ""; break; } } status.setDdl_sql(ddlSql); } tablesResult.add(status); if (limit > 0 && tablesResult.size() >= limit) { break; } } } finally { db.readUnlock(); } } return result; } public void listMaterializedViewStatus(List<TTableStatus> tablesResult, long limit, PatternMatcher matcher, UserIdentity currentUser, String dbName) { Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (db == null) { LOG.warn("database not exists: {}", dbName); return; } db.readLock(); try { for (Table materializedView : db.getMaterializedViews()) { if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, dbName, materializedView.getName(), PrivPredicate.SHOW)) { continue; } if (matcher != null && !matcher.match(materializedView.getName())) { continue; } MaterializedView mvTable = (MaterializedView) materializedView; List<String> createTableStmt = Lists.newArrayList(); GlobalStateMgr.getDdlStmt(mvTable, createTableStmt, null, null, false, true); String ddlSql = createTableStmt.get(0); TTableStatus status = new TTableStatus(); status.setId(String.valueOf(mvTable.getId())); status.setName(mvTable.getName()); status.setDdl_sql(ddlSql); status.setRows(String.valueOf(mvTable.getRowCount())); status.setType(mvTable.getMysqlType()); status.setComment(mvTable.getComment()); tablesResult.add(status); if (limit > 0 && tablesResult.size() >= limit) { return; } } for (Table table : db.getTables()) { if (table.getType() == Table.TableType.OLAP) { OlapTable olapTable = (OlapTable) table; List<MaterializedIndex> visibleMaterializedViews = olapTable.getVisibleIndex(); long baseIdx = olapTable.getBaseIndexId(); for (MaterializedIndex mvIdx : visibleMaterializedViews) { if (baseIdx == mvIdx.getId()) { continue; } if (matcher != null && !matcher.match(olapTable.getIndexNameById(mvIdx.getId()))) { continue; } MaterializedIndexMeta mvMeta = olapTable.getVisibleIndexIdToMeta().get(mvIdx.getId()); TTableStatus status = new TTableStatus(); status.setId(String.valueOf(mvIdx.getId())); status.setName(olapTable.getIndexNameById(mvIdx.getId())); if (mvMeta.getOriginStmt() == null) { StringBuilder originStmtBuilder = new StringBuilder( "create materialized view " + olapTable.getIndexNameById(mvIdx.getId()) + " as select "); String groupByString = ""; for (Column column : mvMeta.getSchema()) { if (column.isKey()) { groupByString += column.getName() + ","; } } originStmtBuilder.append(groupByString); for (Column column : mvMeta.getSchema()) { if (!column.isKey()) { originStmtBuilder.append(column.getAggregationType().toString()).append("(") .append(column.getName()).append(")").append(","); } } originStmtBuilder.delete(originStmtBuilder.length() - 1, originStmtBuilder.length()); originStmtBuilder.append(" from ").append(olapTable.getName()).append(" group by ") .append(groupByString); originStmtBuilder.delete(originStmtBuilder.length() - 1, originStmtBuilder.length()); status.setDdl_sql(originStmtBuilder.toString()); } else { status.setDdl_sql(mvMeta.getOriginStmt().replace("\n", "").replace("\t", "") .replaceAll("[ ]+", " ")); } status.setRows(String.valueOf(mvIdx.getRowCount())); status.setType(""); status.setComment(""); tablesResult.add(status); if (limit > 0 && tablesResult.size() >= limit) { return; } } } } } finally { db.readUnlock(); } } @Override public TGetTaskInfoResult getTasks(TGetTasksParams params) throws TException { LOG.debug("get show task request: {}", params); TGetTaskInfoResult result = new TGetTaskInfoResult(); List<TTaskInfo> tasksResult = Lists.newArrayList(); result.setTasks(tasksResult); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); TaskManager taskManager = globalStateMgr.getTaskManager(); List<Task> taskList = taskManager.showTasks(null); for (Task task : taskList) { if (!globalStateMgr.getAuth().checkDbPriv(currentUser, task.getDbName(), PrivPredicate.SHOW)) { continue; } TTaskInfo info = new TTaskInfo(); info.setTask_name(task.getName()); info.setCreate_time(task.getCreateTime() / 1000); String scheduleStr = task.getType().name(); if (task.getType() == Constants.TaskType.PERIODICAL) { scheduleStr += task.getSchedule(); } info.setSchedule(scheduleStr); info.setDatabase(ClusterNamespace.getNameFromFullName(task.getDbName())); info.setDefinition(task.getDefinition()); info.setExpire_time(task.getExpireTime() / 1000); tasksResult.add(info); } return result; } @Override public TGetTaskRunInfoResult getTaskRuns(TGetTasksParams params) throws TException { LOG.debug("get show task run request: {}", params); TGetTaskRunInfoResult result = new TGetTaskRunInfoResult(); List<TTaskRunInfo> tasksResult = Lists.newArrayList(); result.setTask_runs(tasksResult); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); TaskManager taskManager = globalStateMgr.getTaskManager(); List<TaskRunStatus> taskRunList = taskManager.showTaskRunStatus(null); for (TaskRunStatus status : taskRunList) { if (!globalStateMgr.getAuth().checkDbPriv(currentUser, status.getDbName(), PrivPredicate.SHOW)) { continue; } TTaskRunInfo info = new TTaskRunInfo(); info.setQuery_id(status.getQueryId()); info.setTask_name(status.getTaskName()); info.setCreate_time(status.getCreateTime() / 1000); info.setFinish_time(status.getFinishTime() / 1000); info.setState(status.getState().toString()); info.setDatabase(ClusterNamespace.getNameFromFullName(status.getDbName())); info.setDefinition(status.getDefinition()); info.setError_code(status.getErrorCode()); info.setError_message(status.getErrorMessage()); info.setExpire_time(status.getExpireTime() / 1000); tasksResult.add(info); } return result; } @Override @Override public TGetTablePrivsResult getTablePrivs(TGetTablePrivsParams params) throws TException { LOG.debug("get table privileges request: {}", params); TGetTablePrivsResult result = new TGetTablePrivsResult(); List<TTablePrivDesc> tTablePrivs = Lists.newArrayList(); result.setTable_privs(tTablePrivs); UserIdentity currentUser = UserIdentity.fromThrift(params.current_user_ident); List<TablePrivEntry> tablePrivEntries = GlobalStateMgr.getCurrentState().getAuth().getTablePrivEntries(currentUser); for (TablePrivEntry entry : tablePrivEntries) { PrivBitSet savedPrivs = entry.getPrivSet(); String clusterPrefix = SystemInfoService.DEFAULT_CLUSTER + ClusterNamespace.CLUSTER_DELIMITER; String userIdentStr = currentUser.toString().replace(clusterPrefix, ""); String dbName = entry.getOrigDb(); boolean isGrantable = savedPrivs.satisfy(PrivPredicate.GRANT); List<TTablePrivDesc> tPrivs = savedPrivs.toPrivilegeList().stream().map( priv -> { TTablePrivDesc privDesc = new TTablePrivDesc(); privDesc.setDb_name(dbName); privDesc.setTable_name(entry.getOrigTbl()); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv.getUpperNameForMysql()); return privDesc; } ).collect(Collectors.toList()); if (savedPrivs.satisfy(PrivPredicate.LOAD)) { tPrivs.addAll(Lists.newArrayList("INSERT", "UPDATE", "DELETE").stream().map(priv -> { TTablePrivDesc privDesc = new TTablePrivDesc(); privDesc.setDb_name(dbName); privDesc.setTable_name(entry.getOrigTbl()); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv); return privDesc; }).collect(Collectors.toList())); } tTablePrivs.addAll(tPrivs); } return result; } @Override public TGetUserPrivsResult getUserPrivs(TGetUserPrivsParams params) throws TException { LOG.debug("get user privileges request: {}", params); TGetUserPrivsResult result = new TGetUserPrivsResult(); List<TUserPrivDesc> tUserPrivs = Lists.newArrayList(); result.setUser_privs(tUserPrivs); UserIdentity currentUser = UserIdentity.fromThrift(params.current_user_ident); Auth currAuth = GlobalStateMgr.getCurrentState().getAuth(); UserPrivTable userPrivTable = currAuth.getUserPrivTable(); List<UserIdentity> userIdents = Lists.newArrayList(); userIdents.add(currentUser); for (UserIdentity userIdent : userIdents) { PrivBitSet savedPrivs = new PrivBitSet(); userPrivTable.getPrivs(userIdent, savedPrivs); String clusterPrefix = SystemInfoService.DEFAULT_CLUSTER + ClusterNamespace.CLUSTER_DELIMITER; String userIdentStr = currentUser.toString().replace(clusterPrefix, ""); List<TUserPrivDesc> tPrivs = savedPrivs.toPrivilegeList().stream().map( priv -> { boolean isGrantable = Privilege.NODE_PRIV != priv && userPrivTable.hasPriv(userIdent, PrivPredicate.GRANT); TUserPrivDesc privDesc = new TUserPrivDesc(); privDesc.setIs_grantable(isGrantable); privDesc.setUser_ident_str(userIdentStr); privDesc.setPriv(priv.getUpperNameForMysql()); return privDesc; } ).collect(Collectors.toList()); tUserPrivs.addAll(tPrivs); } return result; } @Override public TFeResult updateExportTaskStatus(TUpdateExportTaskStatusRequest request) throws TException { TStatus status = new TStatus(TStatusCode.OK); TFeResult result = new TFeResult(FrontendServiceVersion.V1, status); return result; } @Override public TDescribeTableResult describeTable(TDescribeTableParams params) throws TException { LOG.debug("get desc table request: {}", params); TDescribeTableResult result = new TDescribeTableResult(); List<TColumnDef> columns = Lists.newArrayList(); result.setColumns(columns); UserIdentity currentUser = null; if (params.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(params.current_user_ident); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip); } long limit = params.isSetLimit() ? params.getLimit() : -1; if (!params.isSetDb() && StringUtils.isBlank(params.getTable_name())) { describeWithoutDbAndTable(currentUser, columns, limit); return result; } if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, params.db, params.getTable_name(), PrivPredicate.SHOW)) { return result; } Database db = GlobalStateMgr.getCurrentState().getDb(params.db); if (db != null) { db.readLock(); try { Table table = db.getTable(params.getTable_name()); setColumnDesc(columns, table, limit, false, params.db, params.getTable_name()); } finally { db.readUnlock(); } } return result; } private void describeWithoutDbAndTable(UserIdentity currentUser, List<TColumnDef> columns, long limit) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); boolean reachLimit; for (String fullName : dbNames) { if (!GlobalStateMgr.getCurrentState().getAuth().checkDbPriv(currentUser, fullName, PrivPredicate.SHOW)) { continue; } Database db = GlobalStateMgr.getCurrentState().getDb(fullName); if (db != null) { for (String tableName : db.getTableNamesWithLock()) { LOG.debug("get table: {}, wait to check", tableName); if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser, fullName, tableName, PrivPredicate.SHOW)) { continue; } db.readLock(); try { Table table = db.getTable(tableName); reachLimit = setColumnDesc(columns, table, limit, true, fullName, tableName); } finally { db.readUnlock(); } if (reachLimit) { return; } } } } } private boolean setColumnDesc(List<TColumnDef> columns, Table table, long limit, boolean needSetDbAndTable, String db, String tbl) { if (table != null) { String tableKeysType = ""; if (TableType.OLAP.equals(table.getType())) { OlapTable olapTable = (OlapTable) table; tableKeysType = olapTable.getKeysType().name().substring(0, 3).toUpperCase(); } for (Column column : table.getBaseSchema()) { final TColumnDesc desc = new TColumnDesc(column.getName(), column.getPrimitiveType().toThrift()); final Integer precision = column.getType().getPrecision(); if (precision != null) { desc.setColumnPrecision(precision); } final Integer columnLength = column.getType().getColumnSize(); if (columnLength != null) { desc.setColumnLength(columnLength); } final Integer decimalDigits = column.getType().getDecimalDigits(); if (decimalDigits != null) { desc.setColumnScale(decimalDigits); } if (column.isKey()) { desc.setColumnKey(tableKeysType); } else { desc.setColumnKey(""); } final TColumnDef colDef = new TColumnDef(desc); final String comment = column.getComment(); if (comment != null) { colDef.setComment(comment); } columns.add(colDef); if (needSetDbAndTable) { columns.get(columns.size() - 1).columnDesc.setDbName(db); columns.get(columns.size() - 1).columnDesc.setTableName(tbl); } if (limit > 0 && columns.size() >= limit) { return true; } } } return false; } @Override public TShowVariableResult showVariables(TShowVariableRequest params) throws TException { TShowVariableResult result = new TShowVariableResult(); Map<String, String> map = Maps.newHashMap(); result.setVariables(map); ConnectContext ctx = exeEnv.getScheduler().getContext(params.getThreadId()); if (ctx == null) { return result; } List<List<String>> rows = VariableMgr.dump(SetType.fromThrift(params.getVarType()), ctx.getSessionVariable(), null); for (List<String> row : rows) { map.put(row.get(0), row.get(1)); } return result; } @Override public TReportExecStatusResult reportExecStatus(TReportExecStatusParams params) throws TException { return QeProcessorImpl.INSTANCE.reportExecStatus(params, getClientAddr()); } @Override public TMasterResult finishTask(TFinishTaskRequest request) throws TException { return leaderImpl.finishTask(request); } @Override public TMasterResult report(TReportRequest request) throws TException { return leaderImpl.report(request); } @Override public TFetchResourceResult fetchResource() throws TException { throw new TException("not supported"); } @Override public TFeResult isMethodSupported(TIsMethodSupportedRequest request) throws TException { TStatus status = new TStatus(TStatusCode.OK); TFeResult result = new TFeResult(FrontendServiceVersion.V1, status); switch (request.getFunction_name()) { case "STREAMING_MINI_LOAD": break; default: status.setStatus_code(NOT_IMPLEMENTED_ERROR); break; } return result; } @Override public TMasterOpResult forward(TMasterOpRequest params) throws TException { TNetworkAddress clientAddr = getClientAddr(); if (clientAddr != null) { Frontend fe = GlobalStateMgr.getCurrentState().getFeByHost(clientAddr.getHostname()); if (fe == null) { LOG.warn("reject request from invalid host. client: {}", clientAddr); throw new TException("request from invalid host was rejected."); } } LOG.info("receive forwarded stmt {} from FE: {}", params.getStmt_id(), clientAddr.getHostname()); ConnectContext context = new ConnectContext(null); ConnectProcessor processor = new ConnectProcessor(context); TMasterOpResult result = processor.proxyExecute(params); ConnectContext.remove(); return result; } private void checkPasswordAndPrivs(String cluster, String user, String passwd, String db, String tbl, String clientIp, PrivPredicate predicate) throws AuthenticationException { final String fullUserName = ClusterNamespace.getFullName(user); List<UserIdentity> currentUser = Lists.newArrayList(); if (!GlobalStateMgr.getCurrentState().getAuth() .checkPlainPassword(fullUserName, clientIp, passwd, currentUser)) { throw new AuthenticationException("Access denied for " + fullUserName + "@" + clientIp); } Preconditions.checkState(currentUser.size() == 1); if (!GlobalStateMgr.getCurrentState().getAuth().checkTblPriv(currentUser.get(0), db, tbl, predicate)) { throw new AuthenticationException( "Access denied; you need (at least one of) the LOAD privilege(s) for this operation"); } } @Override public TLoadTxnBeginResult loadTxnBegin(TLoadTxnBeginRequest request) throws TException { String clientAddr = getClientAddrAsString(); LOG.info("receive txn begin request, db: {}, tbl: {}, label: {}, backend: {}", request.getDb(), request.getTbl(), request.getLabel(), clientAddr); LOG.debug("txn begin request: {}", request); TLoadTxnBeginResult result = new TLoadTxnBeginResult(); if (!GlobalStateMgr.getCurrentState().isLeader()) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList("current fe is not master")); result.setStatus(status); return result; } TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { result.setTxnId(loadTxnBeginImpl(request, clientAddr)); } catch (DuplicatedRequestException e) { LOG.info("duplicate request for stream load. request id: {}, txn_id: {}", e.getDuplicatedRequestId(), e.getTxnId()); result.setTxnId(e.getTxnId()); } catch (LabelAlreadyUsedException e) { status.setStatus_code(TStatusCode.LABEL_ALREADY_EXISTS); status.addToError_msgs(e.getMessage()); result.setJob_status(e.getJobStatus()); } catch (UserException e) { LOG.warn("failed to begin: {}", e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private long loadTxnBeginImpl(TLoadTxnBeginRequest request, String clientIp) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } checkPasswordAndPrivs(cluster, request.getUser(), request.getPasswd(), request.getDb(), request.getTbl(), request.getUser_ip(), PrivPredicate.LOAD); if (Strings.isNullOrEmpty(request.getLabel())) { throw new UserException("empty label in begin request"); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); String dbName = request.getDb(); Database db = globalStateMgr.getDb(dbName); if (db == null) { throw new UserException("unknown database, database=" + dbName); } Table table = db.getTable(request.getTbl()); if (table == null) { throw new UserException("unknown table \"" + request.getDb() + "." + request.getTbl() + "\""); } long timeoutSecond = request.isSetTimeout() ? request.getTimeout() : Config.stream_load_default_timeout_second; MetricRepo.COUNTER_LOAD_ADD.increase(1L); return GlobalStateMgr.getCurrentGlobalTransactionMgr().beginTransaction( db.getId(), Lists.newArrayList(table.getId()), request.getLabel(), request.getRequest_id(), new TxnCoordinator(TxnSourceType.BE, clientIp), TransactionState.LoadJobSourceType.BACKEND_STREAMING, -1, timeoutSecond); } @Override public TLoadTxnCommitResult loadTxnCommit(TLoadTxnCommitRequest request) throws TException { String clientAddr = getClientAddrAsString(); LOG.info("receive txn commit request. db: {}, tbl: {}, txn_id: {}, backend: {}", request.getDb(), request.getTbl(), request.getTxnId(), clientAddr); LOG.debug("txn commit request: {}", request); TLoadTxnCommitResult result = new TLoadTxnCommitResult(); if (!GlobalStateMgr.getCurrentState().isLeader()) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList("current fe is not master")); result.setStatus(status); return result; } TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { if (!loadTxnCommitImpl(request)) { status.setStatus_code(TStatusCode.PUBLISH_TIMEOUT); status.addToError_msgs("Publish timeout. The data will be visible after a while"); } } catch (UserException e) { LOG.warn("failed to commit txn_id: {}: {}", request.getTxnId(), e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private boolean loadTxnCommitImpl(TLoadTxnCommitRequest request) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } if (request.isSetAuth_code()) { } else { checkPasswordAndPrivs(cluster, request.getUser(), request.getPasswd(), request.getDb(), request.getTbl(), request.getUser_ip(), PrivPredicate.LOAD); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); String dbName = request.getDb(); Database db = globalStateMgr.getDb(dbName); if (db == null) { throw new UserException("unknown database, database=" + dbName); } TxnCommitAttachment attachment = TxnCommitAttachment.fromThrift(request.txnCommitAttachment); long timeoutMs = request.isSetThrift_rpc_timeout_ms() ? request.getThrift_rpc_timeout_ms() : 5000; timeoutMs = timeoutMs * 3 / 4; boolean ret = GlobalStateMgr.getCurrentGlobalTransactionMgr().commitAndPublishTransaction( db, request.getTxnId(), TabletCommitInfo.fromThrift(request.getCommitInfos()), timeoutMs, attachment); if (!ret) { return ret; } MetricRepo.COUNTER_LOAD_FINISHED.increase(1L); if (null == attachment) { return ret; } Table tbl = db.getTable(request.getTbl()); if (null == tbl) { return ret; } TableMetricsEntity entity = TableMetricsRegistry.getInstance().getMetricsEntity(tbl.getId()); switch (request.txnCommitAttachment.getLoadType()) { case ROUTINE_LOAD: if (!(attachment instanceof RLTaskTxnCommitAttachment)) { break; } RLTaskTxnCommitAttachment routineAttachment = (RLTaskTxnCommitAttachment) attachment; entity.counterRoutineLoadFinishedTotal.increase(1L); entity.counterRoutineLoadBytesTotal.increase(routineAttachment.getReceivedBytes()); entity.counterRoutineLoadRowsTotal.increase(routineAttachment.getLoadedRows()); break; case MANUAL_LOAD: if (!(attachment instanceof ManualLoadTxnCommitAttachment)) { break; } ManualLoadTxnCommitAttachment streamAttachment = (ManualLoadTxnCommitAttachment) attachment; entity.counterStreamLoadFinishedTotal.increase(1L); entity.counterStreamLoadBytesTotal.increase(streamAttachment.getReceivedBytes()); entity.counterStreamLoadRowsTotal.increase(streamAttachment.getLoadedRows()); break; default: break; } return ret; } @Override public TLoadTxnCommitResult loadTxnPrepare(TLoadTxnCommitRequest request) throws TException { String clientAddr = getClientAddrAsString(); LOG.info("receive txn prepare request. db: {}, tbl: {}, txn_id: {}, backend: {}", request.getDb(), request.getTbl(), request.getTxnId(), clientAddr); LOG.debug("txn prepare request: {}", request); TLoadTxnCommitResult result = new TLoadTxnCommitResult(); if (!GlobalStateMgr.getCurrentState().isLeader()) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList("current fe is not master")); result.setStatus(status); return result; } TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { loadTxnPrepareImpl(request); } catch (UserException e) { LOG.warn("failed to prepare txn_id: {}: {}", request.getTxnId(), e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private void loadTxnPrepareImpl(TLoadTxnCommitRequest request) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } if (request.isSetAuth_code()) { } else { checkPasswordAndPrivs(cluster, request.getUser(), request.getPasswd(), request.getDb(), request.getTbl(), request.getUser_ip(), PrivPredicate.LOAD); } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); String dbName = request.getDb(); Database db = globalStateMgr.getDb(dbName); if (db == null) { throw new UserException("unknown database, database=" + dbName); } TxnCommitAttachment attachment = TxnCommitAttachment.fromThrift(request.txnCommitAttachment); GlobalStateMgr.getCurrentGlobalTransactionMgr().prepareTransaction( db.getId(), request.getTxnId(), TabletCommitInfo.fromThrift(request.getCommitInfos()), attachment); } @Override public TLoadTxnRollbackResult loadTxnRollback(TLoadTxnRollbackRequest request) throws TException { String clientAddr = getClientAddrAsString(); LOG.info("receive txn rollback request. db: {}, tbl: {}, txn_id: {}, reason: {}, backend: {}", request.getDb(), request.getTbl(), request.getTxnId(), request.getReason(), clientAddr); LOG.debug("txn rollback request: {}", request); TLoadTxnRollbackResult result = new TLoadTxnRollbackResult(); if (!GlobalStateMgr.getCurrentState().isLeader()) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList("current fe is not master")); result.setStatus(status); return result; } TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { loadTxnRollbackImpl(request); } catch (TransactionNotFoundException e) { LOG.warn("failed to rollback txn {}: {}", request.getTxnId(), e.getMessage()); status.setStatus_code(TStatusCode.TXN_NOT_EXISTS); status.addToError_msgs(e.getMessage()); } catch (UserException e) { LOG.warn("failed to rollback txn {}: {}", request.getTxnId(), e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private void loadTxnRollbackImpl(TLoadTxnRollbackRequest request) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } if (request.isSetAuth_code()) { } else { checkPasswordAndPrivs(cluster, request.getUser(), request.getPasswd(), request.getDb(), request.getTbl(), request.getUser_ip(), PrivPredicate.LOAD); } String dbName = request.getDb(); Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (db == null) { throw new MetaNotFoundException("db " + dbName + " does not exist"); } long dbId = db.getId(); GlobalStateMgr.getCurrentGlobalTransactionMgr().abortTransaction(dbId, request.getTxnId(), request.isSetReason() ? request.getReason() : "system cancel", TxnCommitAttachment.fromThrift(request.getTxnCommitAttachment())); } @Override public TStreamLoadPutResult streamLoadPut(TStreamLoadPutRequest request) { String clientAddr = getClientAddrAsString(); LOG.info("receive stream load put request. db:{}, tbl: {}, txn_id: {}, load id: {}, backend: {}", request.getDb(), request.getTbl(), request.getTxnId(), DebugUtil.printId(request.getLoadId()), clientAddr); LOG.debug("stream load put request: {}", request); TStreamLoadPutResult result = new TStreamLoadPutResult(); TStatus status = new TStatus(TStatusCode.OK); result.setStatus(status); try { result.setParams(streamLoadPutImpl(request)); } catch (UserException e) { LOG.warn("failed to get stream load plan: {}", e.getMessage()); status.setStatus_code(TStatusCode.ANALYSIS_ERROR); status.addToError_msgs(e.getMessage()); } catch (Throwable e) { LOG.warn("catch unknown result.", e); status.setStatus_code(TStatusCode.INTERNAL_ERROR); status.addToError_msgs(Strings.nullToEmpty(e.getMessage())); return result; } return result; } private TExecPlanFragmentParams streamLoadPutImpl(TStreamLoadPutRequest request) throws UserException { String cluster = request.getCluster(); if (Strings.isNullOrEmpty(cluster)) { cluster = SystemInfoService.DEFAULT_CLUSTER; } GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); String dbName = request.getDb(); Database db = globalStateMgr.getDb(dbName); if (db == null) { throw new UserException("unknown database, database=" + dbName); } long timeoutMs = request.isSetThrift_rpc_timeout_ms() ? request.getThrift_rpc_timeout_ms() : 5000; if (!db.tryReadLock(timeoutMs, TimeUnit.MILLISECONDS)) { throw new UserException("get database read lock timeout, database=" + dbName); } try { Table table = db.getTable(request.getTbl()); if (table == null) { throw new UserException("unknown table, table=" + request.getTbl()); } if (!(table instanceof OlapTable)) { throw new UserException("load table type is not OlapTable, type=" + table.getClass()); } if (table instanceof MaterializedView) { throw new UserException(String.format( "The data of '%s' cannot be inserted because '%s' is a materialized view," + "and the data of materialized view must be consistent with the base table.", table.getName(), table.getName())); } StreamLoadTask streamLoadTask = StreamLoadTask.fromTStreamLoadPutRequest(request, db); StreamLoadPlanner planner = new StreamLoadPlanner(db, (OlapTable) table, streamLoadTask); TExecPlanFragmentParams plan = planner.plan(streamLoadTask.getId()); TransactionState txnState = GlobalStateMgr.getCurrentGlobalTransactionMgr().getTransactionState(db.getId(), request.getTxnId()); if (txnState == null) { throw new UserException("txn does not exist: " + request.getTxnId()); } txnState.addTableIndexes((OlapTable) table); return plan; } finally { db.readUnlock(); } } @Override public TStatus snapshotLoaderReport(TSnapshotLoaderReportRequest request) throws TException { if (GlobalStateMgr.getCurrentState().getBackupHandler().report(request.getTask_type(), request.getJob_id(), request.getTask_id(), request.getFinished_num(), request.getTotal_num())) { return new TStatus(TStatusCode.OK); } return new TStatus(TStatusCode.CANCELLED); } @Override public TRefreshTableResponse refreshTable(TRefreshTableRequest request) throws TException { try { if (request.getCatalog_name() == null) { request.setCatalog_name(InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME); } GlobalStateMgr.getCurrentState().refreshExternalTable(new TableName(request.getCatalog_name(), request.getDb_name(), request.getTable_name()), request.getPartitions()); return new TRefreshTableResponse(new TStatus(TStatusCode.OK)); } catch (DdlException e) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList(e.getMessage())); return new TRefreshTableResponse(status); } } private TNetworkAddress getClientAddr() { ThriftServerContext connectionContext = ThriftServerEventProcessor.getConnectionContext(); if (connectionContext != null) { return connectionContext.getClient(); } return null; } private String getClientAddrAsString() { TNetworkAddress addr = getClientAddr(); return addr == null ? "unknown" : addr.hostname; } @Override public TGetTableMetaResponse getTableMeta(TGetTableMetaRequest request) throws TException { return leaderImpl.getTableMeta(request); } @Override public TBeginRemoteTxnResponse beginRemoteTxn(TBeginRemoteTxnRequest request) throws TException { return leaderImpl.beginRemoteTxn(request); } @Override public TCommitRemoteTxnResponse commitRemoteTxn(TCommitRemoteTxnRequest request) throws TException { return leaderImpl.commitRemoteTxn(request); } @Override public TAbortRemoteTxnResponse abortRemoteTxn(TAbortRemoteTxnRequest request) throws TException { return leaderImpl.abortRemoteTxn(request); } @Override public TSetConfigResponse setConfig(TSetConfigRequest request) throws TException { try { Preconditions.checkState(request.getKeys().size() == request.getValues().size()); Map<String, String> configs = new HashMap<>(); for (int i = 0; i < request.getKeys().size(); i++) { configs.put(request.getKeys().get(i), request.getValues().get(i)); } GlobalStateMgr.getCurrentState().setFrontendConfig(configs); return new TSetConfigResponse(new TStatus(TStatusCode.OK)); } catch (DdlException e) { TStatus status = new TStatus(TStatusCode.INTERNAL_ERROR); status.setError_msgs(Lists.newArrayList(e.getMessage())); return new TSetConfigResponse(status); } } }
the `hashCode` is also inefficient
public int hashCode() { return Objects.hashCode(this.toString()); }
return Objects.hashCode(this.toString());
public int hashCode() { return Objects.hashCode(this.sql); }
class AstKey { private final ParseNode parseNode; public AstKey(ParseNode parseNode) { this.parseNode = parseNode; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || ! (o instanceof AstKey)) { return false; } AstKey other = (AstKey) o; return this.toString().equals(other.toString()); } @Override @Override public String toString() { return new AstToSQLBuilder.AST2SQLBuilderVisitor(true, false).visit(parseNode); } }
class AstKey { private final String sql; public AstKey(ParseNode parseNode) { this.sql = new AstToSQLBuilder.AST2SQLBuilderVisitor(true, false).visit(parseNode); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || ! (o instanceof AstKey)) { return false; } AstKey other = (AstKey) o; if (this.sql == null) { return false; } return this.sql.equals(other.sql); } @Override @Override public String toString() { return this.sql; } }
I think this deserves a "final" here, and the line below too.
public static void main(String[] args) throws Exception { ServerConfiguration configuration = new ServerConfiguration(); CmdLineParser parser = new CmdLineParser(configuration); try { parser.parseArgument(args); fromConfig(configuration).run(); } catch (CmdLineException e) { LOG.error("Unable to parse command line arguments {}", Arrays.asList(args), e); throw new IllegalArgumentException("Unable to parse command line arguments.", e); } catch (Exception e) { LOG.error("Hit exception with SamzaJobServer. Exiting...", e); throw e; } }
ServerConfiguration configuration = new ServerConfiguration();
public static void main(String[] args) throws Exception { final ServerConfiguration configuration = new ServerConfiguration(); final CmdLineParser parser = new CmdLineParser(configuration); try { parser.parseArgument(args); fromConfig(configuration).run(); } catch (CmdLineException e) { LOG.error("Unable to parse command line arguments {}", Arrays.asList(args), e); throw new IllegalArgumentException("Unable to parse command line arguments.", e); } catch (Exception e) { LOG.error("Hit exception with SamzaJobServer. Exiting...", e); throw e; } }
class ServerConfiguration { @Option(name = "--job-port", usage = "The job service port. (Default: 11440)") private int jobPort = 11440; @Option(name = "--control-port", usage = "The FnControl port. (Default: 11441)") private int controlPort = 11441; }
class ServerConfiguration { @Option(name = "--job-port", usage = "The job service port. (Default: 11440)") private int jobPort = 11440; @Option(name = "--control-port", usage = "The FnControl port. (Default: 11441)") private int controlPort = 11441; }
The above TODO can be removed.
protected void createColumnAndViewDefs(Analyzer analyzer) throws AnalysisException, UserException { if (cols != null) { if (cols.size() != viewDefStmt.getColLabels().size()) { ErrorReport.reportAnalysisException(ErrorCode.ERR_VIEW_WRONG_LIST); } for (int i = 0; i < cols.size(); ++i) { Type type = viewDefStmt.getBaseTblResultExprs().get(i).getType().clone(); Column col = new Column(cols.get(i).getColName(), type); col.setComment(cols.get(i).getComment()); finalCols.add(col); } } else { for (int i = 0; i < viewDefStmt.getBaseTblResultExprs().size(); ++i) { Type type = viewDefStmt.getBaseTblResultExprs().get(i).getType().clone(); finalCols.add(new Column(viewDefStmt.getColLabels().get(i), type)); } } Set<String> colSets = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER); for (Column col : finalCols) { if (!colSets.add(col.getName())) { ErrorReport.reportAnalysisException(ErrorCode.ERR_DUP_FIELDNAME, col.getName()); } } originalViewDef = viewDefStmt.toSql(); if (cols == null) { inlineViewDef = originalViewDef; return; } Analyzer tmpAnalyzer = new Analyzer(analyzer); List<String> colNames = cols.stream().map(c -> c.getColName()).collect(Collectors.toList()); cloneStmt.substituteSelectList(tmpAnalyzer, colNames); inlineViewDef = cloneStmt.toSql(); }
Type type = viewDefStmt.getBaseTblResultExprs().get(i).getType().clone();
protected void createColumnAndViewDefs(Analyzer analyzer) throws AnalysisException, UserException { if (cols != null) { if (cols.size() != viewDefStmt.getColLabels().size()) { ErrorReport.reportAnalysisException(ErrorCode.ERR_VIEW_WRONG_LIST); } for (int i = 0; i < cols.size(); ++i) { Type type = viewDefStmt.getBaseTblResultExprs().get(i).getType().clone(); Column col = new Column(cols.get(i).getColName(), type); col.setComment(cols.get(i).getComment()); finalCols.add(col); } } else { for (int i = 0; i < viewDefStmt.getBaseTblResultExprs().size(); ++i) { Type type = viewDefStmt.getBaseTblResultExprs().get(i).getType().clone(); finalCols.add(new Column(viewDefStmt.getColLabels().get(i), type)); } } Set<String> colSets = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER); for (Column col : finalCols) { if (!colSets.add(col.getName())) { ErrorReport.reportAnalysisException(ErrorCode.ERR_DUP_FIELDNAME, col.getName()); } } originalViewDef = viewDefStmt.toSql(); if (cols == null) { inlineViewDef = originalViewDef; return; } Analyzer tmpAnalyzer = new Analyzer(analyzer); List<String> colNames = cols.stream().map(c -> c.getColName()).collect(Collectors.toList()); cloneStmt.substituteSelectList(tmpAnalyzer, colNames); inlineViewDef = cloneStmt.toSql(); }
class BaseViewStmt extends DdlStmt { private static final Logger LOG = LogManager.getLogger(BaseViewStmt.class); protected final TableName tableName; protected final List<ColWithComment> cols; protected final QueryStmt viewDefStmt; protected final List<Column> finalCols; protected String originalViewDef; protected String inlineViewDef; protected QueryStmt cloneStmt; public BaseViewStmt(TableName tableName, List<ColWithComment> cols, QueryStmt queryStmt) { Preconditions.checkNotNull(queryStmt); this.tableName = tableName; this.cols = cols; this.viewDefStmt = queryStmt; finalCols = Lists.newArrayList(); } public String getDbName() { return tableName.getDb(); } public String getTable() { return tableName.getTbl(); } public List<Column> getColumns() { return finalCols; } public String getInlineViewDef() { return inlineViewDef; } /** * Sets the originalViewDef and the expanded inlineViewDef based on viewDefStmt. * If columnNames were given, checks that they do not contain duplicate column names * and throws an exception if they do. */ @Override public void analyze(Analyzer analyzer) throws AnalysisException, UserException { super.analyze(analyzer); if (viewDefStmt.hasOutFileClause()) { throw new AnalysisException("Not support OUTFILE clause in CREATE VIEW statement"); } } }
class BaseViewStmt extends DdlStmt { private static final Logger LOG = LogManager.getLogger(BaseViewStmt.class); protected final TableName tableName; protected final List<ColWithComment> cols; protected final QueryStmt viewDefStmt; protected final List<Column> finalCols; protected String originalViewDef; protected String inlineViewDef; protected QueryStmt cloneStmt; public BaseViewStmt(TableName tableName, List<ColWithComment> cols, QueryStmt queryStmt) { Preconditions.checkNotNull(queryStmt); this.tableName = tableName; this.cols = cols; this.viewDefStmt = queryStmt; finalCols = Lists.newArrayList(); } public String getDbName() { return tableName.getDb(); } public String getTable() { return tableName.getTbl(); } public List<Column> getColumns() { return finalCols; } public String getInlineViewDef() { return inlineViewDef; } /** * Sets the originalViewDef and the expanded inlineViewDef based on viewDefStmt. * If columnNames were given, checks that they do not contain duplicate column names * and throws an exception if they do. */ @Override public void analyze(Analyzer analyzer) throws AnalysisException, UserException { super.analyze(analyzer); if (viewDefStmt.hasOutFileClause()) { throw new AnalysisException("Not support OUTFILE clause in CREATE VIEW statement"); } } }
Then maybe we substitute `Set<CoLocationGroupDesc>` with `Map<AbstractID, CoLocationGroupDesc>`? ``` final Set<CoLocationGroupDesc> coLocationGroupDescs = getVertices() .stream() .map(vertex -> CoLocationGroupDesc.from(vertex.getCoLocationGroup())) .collect(Collectors.toMap(CoLocationGroupDesc::getId, d -> d)) .values() .stream() .collect(Collectors.toSet()); ``` It can be also in a for-loop, but it is nit anyways so I think it is ok as it is. Maybe, at least a comment why we do it like this: `because equals ...`.
public Set<CoLocationGroupDesc> getCoLocationGroupDescriptors() { final Set<CoLocationGroup> coLocationGroups = new HashSet<>(); for (JobVertex vertex : getVertices()) { CoLocationGroup coLocationGroup = vertex.getCoLocationGroup(); if (coLocationGroup != null) { coLocationGroups.add(coLocationGroup); } } final Set<CoLocationGroupDesc> coLocationGroupDescs = coLocationGroups .stream() .map(CoLocationGroupDesc::from) .collect(Collectors.toSet()); return Collections.unmodifiableSet(coLocationGroupDescs); }
}
public Set<CoLocationGroupDesc> getCoLocationGroupDescriptors() { final Set<CoLocationGroupDesc> coLocationGroups = IterableUtils .toStream(getVertices()) .map(JobVertex::getCoLocationGroup) .filter(Objects::nonNull) .distinct() .map(CoLocationGroupDesc::from) .collect(Collectors.toSet()); return Collections.unmodifiableSet(coLocationGroups); }
class JobGraph implements Serializable { private static final long serialVersionUID = 1L; /** List of task vertices included in this job graph. */ private final Map<JobVertexID, JobVertex> taskVertices = new LinkedHashMap<JobVertexID, JobVertex>(); /** The job configuration attached to this job. */ private final Configuration jobConfiguration = new Configuration(); /** ID of this job. May be set if specific job id is desired (e.g. session management) */ private JobID jobID; /** Name of this job. */ private final String jobName; /** The mode in which the job is scheduled. */ private ScheduleMode scheduleMode = ScheduleMode.LAZY_FROM_SOURCES; /** Job specific execution config. */ private SerializedValue<ExecutionConfig> serializedExecutionConfig; /** The settings for the job checkpoints. */ private JobCheckpointingSettings snapshotSettings; /** Savepoint restore settings. */ private SavepointRestoreSettings savepointRestoreSettings = SavepointRestoreSettings.none(); /** Set of JAR files required to run this job. */ private final List<Path> userJars = new ArrayList<Path>(); /** Set of custom files required to run this job. */ private final Map<String, DistributedCache.DistributedCacheEntry> userArtifacts = new HashMap<>(); /** Set of blob keys identifying the JAR files required to run this job. */ private final List<PermanentBlobKey> userJarBlobKeys = new ArrayList<>(); /** List of classpaths required to run this job. */ private List<URL> classpaths = Collections.emptyList(); /** * Constructs a new job graph with the given name, the given {@link ExecutionConfig}, * and a random job ID. The ExecutionConfig will be serialized and can't be modified afterwards. * * @param jobName The name of the job. */ public JobGraph(String jobName) { this(null, jobName); } /** * Constructs a new job graph with the given job ID (or a random ID, if {@code null} is passed), * the given name and the given execution configuration (see {@link ExecutionConfig}). * The ExecutionConfig will be serialized and can't be modified afterwards. * * @param jobId The id of the job. A random ID is generated, if {@code null} is passed. * @param jobName The name of the job. */ public JobGraph(JobID jobId, String jobName) { this.jobID = jobId == null ? new JobID() : jobId; this.jobName = jobName == null ? "(unnamed job)" : jobName; try { setExecutionConfig(new ExecutionConfig()); } catch (IOException e) { throw new RuntimeException("bug, empty execution config is not serializable"); } } /** * Constructs a new job graph with no name, a random job ID, the given {@link ExecutionConfig}, and * the given job vertices. The ExecutionConfig will be serialized and can't be modified afterwards. * * @param vertices The vertices to add to the graph. */ public JobGraph(JobVertex... vertices) { this(null, vertices); } /** * Constructs a new job graph with the given name, the given {@link ExecutionConfig}, a random job ID, * and the given job vertices. The ExecutionConfig will be serialized and can't be modified afterwards. * * @param jobName The name of the job. * @param vertices The vertices to add to the graph. */ public JobGraph(String jobName, JobVertex... vertices) { this(null, jobName, vertices); } /** * Constructs a new job graph with the given name, the given {@link ExecutionConfig}, * the given jobId or a random one if null supplied, and the given job vertices. * The ExecutionConfig will be serialized and can't be modified afterwards. * * @param jobId The id of the job. A random ID is generated, if {@code null} is passed. * @param jobName The name of the job. * @param vertices The vertices to add to the graph. */ public JobGraph(JobID jobId, String jobName, JobVertex... vertices) { this(jobId, jobName); for (JobVertex vertex : vertices) { addVertex(vertex); } } /** * Returns the ID of the job. * * @return the ID of the job */ public JobID getJobID() { return this.jobID; } /** * Sets the ID of the job. */ public void setJobID(JobID jobID) { this.jobID = jobID; } /** * Returns the name assigned to the job graph. * * @return the name assigned to the job graph */ public String getName() { return this.jobName; } /** * Returns the configuration object for this job. Job-wide parameters should be set into that * configuration object. * * @return The configuration object for this job. */ public Configuration getJobConfiguration() { return this.jobConfiguration; } /** * Returns the {@link ExecutionConfig}. * * @return ExecutionConfig */ public SerializedValue<ExecutionConfig> getSerializedExecutionConfig() { return serializedExecutionConfig; } public void setScheduleMode(ScheduleMode scheduleMode) { this.scheduleMode = scheduleMode; } public ScheduleMode getScheduleMode() { return scheduleMode; } /** * Sets the savepoint restore settings. * @param settings The savepoint restore settings. */ public void setSavepointRestoreSettings(SavepointRestoreSettings settings) { this.savepointRestoreSettings = checkNotNull(settings, "Savepoint restore settings"); } /** * Returns the configured savepoint restore setting. * @return The configured savepoint restore settings. */ public SavepointRestoreSettings getSavepointRestoreSettings() { return savepointRestoreSettings; } /** * Sets the execution config. This method eagerly serialized the ExecutionConfig for future RPC * transport. Further modification of the referenced ExecutionConfig object will not affect * this serialized copy. * * @param executionConfig The ExecutionConfig to be serialized. * @throws IOException Thrown if the serialization of the ExecutionConfig fails */ public void setExecutionConfig(ExecutionConfig executionConfig) throws IOException { checkNotNull(executionConfig, "ExecutionConfig must not be null."); this.serializedExecutionConfig = new SerializedValue<>(executionConfig); } /** * Adds a new task vertex to the job graph if it is not already included. * * @param vertex * the new task vertex to be added */ public void addVertex(JobVertex vertex) { final JobVertexID id = vertex.getID(); JobVertex previous = taskVertices.put(id, vertex); if (previous != null) { taskVertices.put(id, previous); throw new IllegalArgumentException("The JobGraph already contains a vertex with that id."); } } /** * Returns an Iterable to iterate all vertices registered with the job graph. * * @return an Iterable to iterate all vertices registered with the job graph */ public Iterable<JobVertex> getVertices() { return this.taskVertices.values(); } /** * Returns an array of all job vertices that are registered with the job graph. The order in which the vertices * appear in the list is not defined. * * @return an array of all job vertices that are registered with the job graph */ public JobVertex[] getVerticesAsArray() { return this.taskVertices.values().toArray(new JobVertex[this.taskVertices.size()]); } /** * Returns the number of all vertices. * * @return The number of all vertices. */ public int getNumberOfVertices() { return this.taskVertices.size(); } public Set<SlotSharingGroup> getSlotSharingGroups() { final Set<SlotSharingGroup> slotSharingGroups = new HashSet<>(); for (JobVertex vertex : getVertices()) { final SlotSharingGroup slotSharingGroup = vertex.getSlotSharingGroup(); checkNotNull(slotSharingGroup); slotSharingGroups.add(slotSharingGroup); } return Collections.unmodifiableSet(slotSharingGroups); } /** * Sets the settings for asynchronous snapshots. A value of {@code null} means that * snapshotting is not enabled. * * @param settings The snapshot settings */ public void setSnapshotSettings(JobCheckpointingSettings settings) { this.snapshotSettings = settings; } /** * Gets the settings for asynchronous snapshots. This method returns null, when * checkpointing is not enabled. * * @return The snapshot settings */ public JobCheckpointingSettings getCheckpointingSettings() { return snapshotSettings; } /** * Checks if the checkpointing was enabled for this job graph. * * @return true if checkpointing enabled */ public boolean isCheckpointingEnabled() { if (snapshotSettings == null) { return false; } long checkpointInterval = snapshotSettings.getCheckpointCoordinatorConfiguration().getCheckpointInterval(); return checkpointInterval > 0 && checkpointInterval < Long.MAX_VALUE; } /** * Searches for a vertex with a matching ID and returns it. * * @param id * the ID of the vertex to search for * @return the vertex with the matching ID or <code>null</code> if no vertex with such ID could be found */ public JobVertex findVertexByID(JobVertexID id) { return this.taskVertices.get(id); } /** * Sets the classpaths required to run the job on a task manager. * * @param paths paths of the directories/JAR files required to run the job on a task manager */ public void setClasspaths(List<URL> paths) { classpaths = paths; } public List<URL> getClasspaths() { return classpaths; } /** * Gets the maximum parallelism of all operations in this job graph. * * @return The maximum parallelism of this job graph */ public int getMaximumParallelism() { int maxParallelism = -1; for (JobVertex vertex : taskVertices.values()) { maxParallelism = Math.max(vertex.getParallelism(), maxParallelism); } return maxParallelism; } public List<JobVertex> getVerticesSortedTopologicallyFromSources() throws InvalidProgramException { if (this.taskVertices.isEmpty()) { return Collections.emptyList(); } List<JobVertex> sorted = new ArrayList<JobVertex>(this.taskVertices.size()); Set<JobVertex> remaining = new LinkedHashSet<JobVertex>(this.taskVertices.values()); { Iterator<JobVertex> iter = remaining.iterator(); while (iter.hasNext()) { JobVertex vertex = iter.next(); if (vertex.hasNoConnectedInputs()) { sorted.add(vertex); iter.remove(); } } } int startNodePos = 0; while (!remaining.isEmpty()) { if (startNodePos >= sorted.size()) { throw new InvalidProgramException("The job graph is cyclic."); } JobVertex current = sorted.get(startNodePos++); addNodesThatHaveNoNewPredecessors(current, sorted, remaining); } return sorted; } private void addNodesThatHaveNoNewPredecessors(JobVertex start, List<JobVertex> target, Set<JobVertex> remaining) { for (IntermediateDataSet dataSet : start.getProducedDataSets()) { for (JobEdge edge : dataSet.getConsumers()) { JobVertex v = edge.getTarget(); if (!remaining.contains(v)) { continue; } boolean hasNewPredecessors = false; for (JobEdge e : v.getInputs()) { if (e == edge) { continue; } IntermediateDataSet source = e.getSource(); if (remaining.contains(source.getProducer())) { hasNewPredecessors = true; break; } } if (!hasNewPredecessors) { target.add(v); remaining.remove(v); addNodesThatHaveNoNewPredecessors(v, target, remaining); } } } } /** * Adds the path of a JAR file required to run the job on a task manager. * * @param jar * path of the JAR file required to run the job on a task manager */ public void addJar(Path jar) { if (jar == null) { throw new IllegalArgumentException(); } if (!userJars.contains(jar)) { userJars.add(jar); } } /** * Adds the given jar files to the {@link JobGraph} via {@link JobGraph * * @param jarFilesToAttach a list of the {@link URL URLs} of the jar files to attach to the jobgraph. * @throws RuntimeException if a jar URL is not valid. */ public void addJars(final List<URL> jarFilesToAttach) { for (URL jar : jarFilesToAttach) { try { addJar(new Path(jar.toURI())); } catch (URISyntaxException e) { throw new RuntimeException("URL is invalid. This should not happen.", e); } } } /** * Gets the list of assigned user jar paths. * * @return The list of assigned user jar paths */ public List<Path> getUserJars() { return userJars; } /** * Adds the path of a custom file required to run the job on a task manager. * * @param name a name under which this artifact will be accessible through {@link DistributedCache} * @param file path of a custom file required to run the job on a task manager */ public void addUserArtifact(String name, DistributedCache.DistributedCacheEntry file) { if (file == null) { throw new IllegalArgumentException(); } userArtifacts.putIfAbsent(name, file); } /** * Gets the list of assigned user jar paths. * * @return The list of assigned user jar paths */ public Map<String, DistributedCache.DistributedCacheEntry> getUserArtifacts() { return userArtifacts; } /** * Adds the BLOB referenced by the key to the JobGraph's dependencies. * * @param key * path of the JAR file required to run the job on a task manager */ public void addUserJarBlobKey(PermanentBlobKey key) { if (key == null) { throw new IllegalArgumentException(); } if (!userJarBlobKeys.contains(key)) { userJarBlobKeys.add(key); } } /** * Checks whether the JobGraph has user code JAR files attached. * * @return True, if the JobGraph has user code JAR files attached, false otherwise. */ public boolean hasUsercodeJarFiles() { return this.userJars.size() > 0; } /** * Returns a set of BLOB keys referring to the JAR files required to run this job. * * @return set of BLOB keys referring to the JAR files required to run this job */ public List<PermanentBlobKey> getUserJarBlobKeys() { return this.userJarBlobKeys; } @Override public String toString() { return "JobGraph(jobId: " + jobID + ")"; } public void setUserArtifactBlobKey(String entryName, PermanentBlobKey blobKey) throws IOException { byte[] serializedBlobKey; serializedBlobKey = InstantiationUtil.serializeObject(blobKey); userArtifacts.computeIfPresent(entryName, (key, originalEntry) -> new DistributedCache.DistributedCacheEntry( originalEntry.filePath, originalEntry.isExecutable, serializedBlobKey, originalEntry.isZipped )); } public void setUserArtifactRemotePath(String entryName, String remotePath) { userArtifacts.computeIfPresent(entryName, (key, originalEntry) -> new DistributedCache.DistributedCacheEntry( remotePath, originalEntry.isExecutable, null, originalEntry.isZipped )); } public void writeUserArtifactEntriesToConfiguration() { for (Map.Entry<String, DistributedCache.DistributedCacheEntry> userArtifact : userArtifacts.entrySet()) { DistributedCache.writeFileInfoToConfig( userArtifact.getKey(), userArtifact.getValue(), jobConfiguration ); } } }
class JobGraph implements Serializable { private static final long serialVersionUID = 1L; /** List of task vertices included in this job graph. */ private final Map<JobVertexID, JobVertex> taskVertices = new LinkedHashMap<JobVertexID, JobVertex>(); /** The job configuration attached to this job. */ private final Configuration jobConfiguration = new Configuration(); /** ID of this job. May be set if specific job id is desired (e.g. session management) */ private JobID jobID; /** Name of this job. */ private final String jobName; /** The mode in which the job is scheduled. */ private ScheduleMode scheduleMode = ScheduleMode.LAZY_FROM_SOURCES; /** Job specific execution config. */ private SerializedValue<ExecutionConfig> serializedExecutionConfig; /** The settings for the job checkpoints. */ private JobCheckpointingSettings snapshotSettings; /** Savepoint restore settings. */ private SavepointRestoreSettings savepointRestoreSettings = SavepointRestoreSettings.none(); /** Set of JAR files required to run this job. */ private final List<Path> userJars = new ArrayList<Path>(); /** Set of custom files required to run this job. */ private final Map<String, DistributedCache.DistributedCacheEntry> userArtifacts = new HashMap<>(); /** Set of blob keys identifying the JAR files required to run this job. */ private final List<PermanentBlobKey> userJarBlobKeys = new ArrayList<>(); /** List of classpaths required to run this job. */ private List<URL> classpaths = Collections.emptyList(); /** * Constructs a new job graph with the given name, the given {@link ExecutionConfig}, * and a random job ID. The ExecutionConfig will be serialized and can't be modified afterwards. * * @param jobName The name of the job. */ public JobGraph(String jobName) { this(null, jobName); } /** * Constructs a new job graph with the given job ID (or a random ID, if {@code null} is passed), * the given name and the given execution configuration (see {@link ExecutionConfig}). * The ExecutionConfig will be serialized and can't be modified afterwards. * * @param jobId The id of the job. A random ID is generated, if {@code null} is passed. * @param jobName The name of the job. */ public JobGraph(JobID jobId, String jobName) { this.jobID = jobId == null ? new JobID() : jobId; this.jobName = jobName == null ? "(unnamed job)" : jobName; try { setExecutionConfig(new ExecutionConfig()); } catch (IOException e) { throw new RuntimeException("bug, empty execution config is not serializable"); } } /** * Constructs a new job graph with no name, a random job ID, the given {@link ExecutionConfig}, and * the given job vertices. The ExecutionConfig will be serialized and can't be modified afterwards. * * @param vertices The vertices to add to the graph. */ public JobGraph(JobVertex... vertices) { this(null, vertices); } /** * Constructs a new job graph with the given name, the given {@link ExecutionConfig}, a random job ID, * and the given job vertices. The ExecutionConfig will be serialized and can't be modified afterwards. * * @param jobName The name of the job. * @param vertices The vertices to add to the graph. */ public JobGraph(String jobName, JobVertex... vertices) { this(null, jobName, vertices); } /** * Constructs a new job graph with the given name, the given {@link ExecutionConfig}, * the given jobId or a random one if null supplied, and the given job vertices. * The ExecutionConfig will be serialized and can't be modified afterwards. * * @param jobId The id of the job. A random ID is generated, if {@code null} is passed. * @param jobName The name of the job. * @param vertices The vertices to add to the graph. */ public JobGraph(JobID jobId, String jobName, JobVertex... vertices) { this(jobId, jobName); for (JobVertex vertex : vertices) { addVertex(vertex); } } /** * Returns the ID of the job. * * @return the ID of the job */ public JobID getJobID() { return this.jobID; } /** * Sets the ID of the job. */ public void setJobID(JobID jobID) { this.jobID = jobID; } /** * Returns the name assigned to the job graph. * * @return the name assigned to the job graph */ public String getName() { return this.jobName; } /** * Returns the configuration object for this job. Job-wide parameters should be set into that * configuration object. * * @return The configuration object for this job. */ public Configuration getJobConfiguration() { return this.jobConfiguration; } /** * Returns the {@link ExecutionConfig}. * * @return ExecutionConfig */ public SerializedValue<ExecutionConfig> getSerializedExecutionConfig() { return serializedExecutionConfig; } public void setScheduleMode(ScheduleMode scheduleMode) { this.scheduleMode = scheduleMode; } public ScheduleMode getScheduleMode() { return scheduleMode; } /** * Sets the savepoint restore settings. * @param settings The savepoint restore settings. */ public void setSavepointRestoreSettings(SavepointRestoreSettings settings) { this.savepointRestoreSettings = checkNotNull(settings, "Savepoint restore settings"); } /** * Returns the configured savepoint restore setting. * @return The configured savepoint restore settings. */ public SavepointRestoreSettings getSavepointRestoreSettings() { return savepointRestoreSettings; } /** * Sets the execution config. This method eagerly serialized the ExecutionConfig for future RPC * transport. Further modification of the referenced ExecutionConfig object will not affect * this serialized copy. * * @param executionConfig The ExecutionConfig to be serialized. * @throws IOException Thrown if the serialization of the ExecutionConfig fails */ public void setExecutionConfig(ExecutionConfig executionConfig) throws IOException { checkNotNull(executionConfig, "ExecutionConfig must not be null."); this.serializedExecutionConfig = new SerializedValue<>(executionConfig); } /** * Adds a new task vertex to the job graph if it is not already included. * * @param vertex * the new task vertex to be added */ public void addVertex(JobVertex vertex) { final JobVertexID id = vertex.getID(); JobVertex previous = taskVertices.put(id, vertex); if (previous != null) { taskVertices.put(id, previous); throw new IllegalArgumentException("The JobGraph already contains a vertex with that id."); } } /** * Returns an Iterable to iterate all vertices registered with the job graph. * * @return an Iterable to iterate all vertices registered with the job graph */ public Iterable<JobVertex> getVertices() { return this.taskVertices.values(); } /** * Returns an array of all job vertices that are registered with the job graph. The order in which the vertices * appear in the list is not defined. * * @return an array of all job vertices that are registered with the job graph */ public JobVertex[] getVerticesAsArray() { return this.taskVertices.values().toArray(new JobVertex[this.taskVertices.size()]); } /** * Returns the number of all vertices. * * @return The number of all vertices. */ public int getNumberOfVertices() { return this.taskVertices.size(); } public Set<SlotSharingGroup> getSlotSharingGroups() { final Set<SlotSharingGroup> slotSharingGroups = IterableUtils .toStream(getVertices()) .map(JobVertex::getSlotSharingGroup) .collect(Collectors.toSet()); return Collections.unmodifiableSet(slotSharingGroups); } /** * Sets the settings for asynchronous snapshots. A value of {@code null} means that * snapshotting is not enabled. * * @param settings The snapshot settings */ public void setSnapshotSettings(JobCheckpointingSettings settings) { this.snapshotSettings = settings; } /** * Gets the settings for asynchronous snapshots. This method returns null, when * checkpointing is not enabled. * * @return The snapshot settings */ public JobCheckpointingSettings getCheckpointingSettings() { return snapshotSettings; } /** * Checks if the checkpointing was enabled for this job graph. * * @return true if checkpointing enabled */ public boolean isCheckpointingEnabled() { if (snapshotSettings == null) { return false; } long checkpointInterval = snapshotSettings.getCheckpointCoordinatorConfiguration().getCheckpointInterval(); return checkpointInterval > 0 && checkpointInterval < Long.MAX_VALUE; } /** * Searches for a vertex with a matching ID and returns it. * * @param id * the ID of the vertex to search for * @return the vertex with the matching ID or <code>null</code> if no vertex with such ID could be found */ public JobVertex findVertexByID(JobVertexID id) { return this.taskVertices.get(id); } /** * Sets the classpaths required to run the job on a task manager. * * @param paths paths of the directories/JAR files required to run the job on a task manager */ public void setClasspaths(List<URL> paths) { classpaths = paths; } public List<URL> getClasspaths() { return classpaths; } /** * Gets the maximum parallelism of all operations in this job graph. * * @return The maximum parallelism of this job graph */ public int getMaximumParallelism() { int maxParallelism = -1; for (JobVertex vertex : taskVertices.values()) { maxParallelism = Math.max(vertex.getParallelism(), maxParallelism); } return maxParallelism; } public List<JobVertex> getVerticesSortedTopologicallyFromSources() throws InvalidProgramException { if (this.taskVertices.isEmpty()) { return Collections.emptyList(); } List<JobVertex> sorted = new ArrayList<JobVertex>(this.taskVertices.size()); Set<JobVertex> remaining = new LinkedHashSet<JobVertex>(this.taskVertices.values()); { Iterator<JobVertex> iter = remaining.iterator(); while (iter.hasNext()) { JobVertex vertex = iter.next(); if (vertex.hasNoConnectedInputs()) { sorted.add(vertex); iter.remove(); } } } int startNodePos = 0; while (!remaining.isEmpty()) { if (startNodePos >= sorted.size()) { throw new InvalidProgramException("The job graph is cyclic."); } JobVertex current = sorted.get(startNodePos++); addNodesThatHaveNoNewPredecessors(current, sorted, remaining); } return sorted; } private void addNodesThatHaveNoNewPredecessors(JobVertex start, List<JobVertex> target, Set<JobVertex> remaining) { for (IntermediateDataSet dataSet : start.getProducedDataSets()) { for (JobEdge edge : dataSet.getConsumers()) { JobVertex v = edge.getTarget(); if (!remaining.contains(v)) { continue; } boolean hasNewPredecessors = false; for (JobEdge e : v.getInputs()) { if (e == edge) { continue; } IntermediateDataSet source = e.getSource(); if (remaining.contains(source.getProducer())) { hasNewPredecessors = true; break; } } if (!hasNewPredecessors) { target.add(v); remaining.remove(v); addNodesThatHaveNoNewPredecessors(v, target, remaining); } } } } /** * Adds the path of a JAR file required to run the job on a task manager. * * @param jar * path of the JAR file required to run the job on a task manager */ public void addJar(Path jar) { if (jar == null) { throw new IllegalArgumentException(); } if (!userJars.contains(jar)) { userJars.add(jar); } } /** * Adds the given jar files to the {@link JobGraph} via {@link JobGraph * * @param jarFilesToAttach a list of the {@link URL URLs} of the jar files to attach to the jobgraph. * @throws RuntimeException if a jar URL is not valid. */ public void addJars(final List<URL> jarFilesToAttach) { for (URL jar : jarFilesToAttach) { try { addJar(new Path(jar.toURI())); } catch (URISyntaxException e) { throw new RuntimeException("URL is invalid. This should not happen.", e); } } } /** * Gets the list of assigned user jar paths. * * @return The list of assigned user jar paths */ public List<Path> getUserJars() { return userJars; } /** * Adds the path of a custom file required to run the job on a task manager. * * @param name a name under which this artifact will be accessible through {@link DistributedCache} * @param file path of a custom file required to run the job on a task manager */ public void addUserArtifact(String name, DistributedCache.DistributedCacheEntry file) { if (file == null) { throw new IllegalArgumentException(); } userArtifacts.putIfAbsent(name, file); } /** * Gets the list of assigned user jar paths. * * @return The list of assigned user jar paths */ public Map<String, DistributedCache.DistributedCacheEntry> getUserArtifacts() { return userArtifacts; } /** * Adds the BLOB referenced by the key to the JobGraph's dependencies. * * @param key * path of the JAR file required to run the job on a task manager */ public void addUserJarBlobKey(PermanentBlobKey key) { if (key == null) { throw new IllegalArgumentException(); } if (!userJarBlobKeys.contains(key)) { userJarBlobKeys.add(key); } } /** * Checks whether the JobGraph has user code JAR files attached. * * @return True, if the JobGraph has user code JAR files attached, false otherwise. */ public boolean hasUsercodeJarFiles() { return this.userJars.size() > 0; } /** * Returns a set of BLOB keys referring to the JAR files required to run this job. * * @return set of BLOB keys referring to the JAR files required to run this job */ public List<PermanentBlobKey> getUserJarBlobKeys() { return this.userJarBlobKeys; } @Override public String toString() { return "JobGraph(jobId: " + jobID + ")"; } public void setUserArtifactBlobKey(String entryName, PermanentBlobKey blobKey) throws IOException { byte[] serializedBlobKey; serializedBlobKey = InstantiationUtil.serializeObject(blobKey); userArtifacts.computeIfPresent(entryName, (key, originalEntry) -> new DistributedCache.DistributedCacheEntry( originalEntry.filePath, originalEntry.isExecutable, serializedBlobKey, originalEntry.isZipped )); } public void setUserArtifactRemotePath(String entryName, String remotePath) { userArtifacts.computeIfPresent(entryName, (key, originalEntry) -> new DistributedCache.DistributedCacheEntry( remotePath, originalEntry.isExecutable, null, originalEntry.isZipped )); } public void writeUserArtifactEntriesToConfiguration() { for (Map.Entry<String, DistributedCache.DistributedCacheEntry> userArtifact : userArtifacts.entrySet()) { DistributedCache.writeFileInfoToConfig( userArtifact.getKey(), userArtifact.getValue(), jobConfiguration ); } } }
Oh, I see! You cache the `Uni` and resubscribe to it every time (well Quarkus does). That's actually interesting, but I'm not sure it's what the user would expect. Imagine: ```java @GET public Uni<String> callMyRemoteService() { return webClient.send().map(r -> r.bodyAsString()); } ``` Basically, it calls a remote service. If you cache the result, what would/should happen? 1. the response is cached - the users will get the same response, avoiding calls to the remote service 2. the uni is cached - to every time there is a request, there is another subscription calling the remote service I would have said 1, but it looks like 2 has been implemented. Can you confirm?
public Uni<String> cachedMethod(String key) { invocations++; return Uni.createFrom().item(() -> new String()); }
return Uni.createFrom().item(() -> new String());
public Uni<String> cachedMethod(String key) { invocations++; return Uni.createFrom().item(() -> { subscriptions++; return "" + subscriptions; }); }
class CachedService { private int invocations; @CacheResult(cacheName = "test-cache") public int getInvocations() { return invocations; } }
class CachedService { private int invocations; private int subscriptions; @CacheResult(cacheName = "test-cache") public int getInvocations() { return invocations; } }
If you could abstract the query schedule strategy like `Presto` and refactor this class, which would be very great.
private void computeFragmentExecParams() throws Exception { computeFragmentHosts(); instanceIds.clear(); for (FragmentExecParams params : fragmentExecParamsMap.values()) { if (LOG.isDebugEnabled()) { LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size()); } for (int j = 0; j < params.instanceExecParams.size(); ++j) { TUniqueId instanceId = new TUniqueId(); instanceId.setHi(queryId.hi); instanceId.setLo(queryId.lo + instanceIds.size() + 1); params.instanceExecParams.get(j).instanceId = instanceId; instanceIds.add(instanceId); } } for (FragmentExecParams params : fragmentExecParamsMap.values()) { PlanFragment destFragment = params.fragment.getDestFragment(); if (destFragment == null) { continue; } FragmentExecParams destParams = fragmentExecParamsMap.get(destFragment.getFragmentId()); DataSink sink = params.fragment.getSink(); PlanNodeId exchId = sink.getExchNodeId(); if (destParams.perExchNumSenders.get(exchId.asInt()) == null) { destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size()); } else { destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size() + destParams.perExchNumSenders.get(exchId.asInt())); } if (bucketShuffleJoinController.isBucketShuffleJoin(destFragment.getFragmentId().asInt())) { int bucketSeq = 0; int bucketNum = bucketShuffleJoinController.getFragmentBucketNum(destFragment.getFragmentId()); TNetworkAddress dummyServer = new TNetworkAddress("0.0.0.0", 0); while (bucketSeq < bucketNum) { TPlanFragmentDestination dest = new TPlanFragmentDestination(); dest.fragment_instance_id = new TUniqueId(-1, -1); dest.server = dummyServer; dest.setBrpcServer(dummyServer); for (FInstanceExecParam instanceExecParams : destParams.instanceExecParams) { if (instanceExecParams.bucketSeqSet.contains(bucketSeq)) { dest.fragment_instance_id = instanceExecParams.instanceId; dest.server = toRpcHost(instanceExecParams.host); dest.setBrpcServer(toBrpcHost(instanceExecParams.host)); break; } } bucketSeq++; params.destinations.add(dest); } } else { for (int j = 0; j < destParams.instanceExecParams.size(); ++j) { TPlanFragmentDestination dest = new TPlanFragmentDestination(); dest.fragment_instance_id = destParams.instanceExecParams.get(j).instanceId; dest.server = toRpcHost(destParams.instanceExecParams.get(j).host); dest.setBrpcServer(toBrpcHost(destParams.instanceExecParams.get(j).host)); params.destinations.add(dest); } } } }
if (bucketShuffleJoinController.isBucketShuffleJoin(destFragment.getFragmentId().asInt())) {
private void computeFragmentExecParams() throws Exception { computeFragmentHosts(); instanceIds.clear(); for (FragmentExecParams params : fragmentExecParamsMap.values()) { if (LOG.isDebugEnabled()) { LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size()); } for (int j = 0; j < params.instanceExecParams.size(); ++j) { TUniqueId instanceId = new TUniqueId(); instanceId.setHi(queryId.hi); instanceId.setLo(queryId.lo + instanceIds.size() + 1); params.instanceExecParams.get(j).instanceId = instanceId; instanceIds.add(instanceId); } } for (FragmentExecParams params : fragmentExecParamsMap.values()) { PlanFragment destFragment = params.fragment.getDestFragment(); if (destFragment == null) { continue; } FragmentExecParams destParams = fragmentExecParamsMap.get(destFragment.getFragmentId()); DataSink sink = params.fragment.getSink(); PlanNodeId exchId = sink.getExchNodeId(); if (destParams.perExchNumSenders.get(exchId.asInt()) == null) { destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size()); } else { destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size() + destParams.perExchNumSenders.get(exchId.asInt())); } if (bucketShuffleJoinController.isBucketShuffleJoin(destFragment.getFragmentId().asInt())) { int bucketSeq = 0; int bucketNum = bucketShuffleJoinController.getFragmentBucketNum(destFragment.getFragmentId()); TNetworkAddress dummyServer = new TNetworkAddress("0.0.0.0", 0); while (bucketSeq < bucketNum) { TPlanFragmentDestination dest = new TPlanFragmentDestination(); dest.fragment_instance_id = new TUniqueId(-1, -1); dest.server = dummyServer; dest.setBrpcServer(dummyServer); for (FInstanceExecParam instanceExecParams : destParams.instanceExecParams) { if (instanceExecParams.bucketSeqSet.contains(bucketSeq)) { dest.fragment_instance_id = instanceExecParams.instanceId; dest.server = toRpcHost(instanceExecParams.host); dest.setBrpcServer(toBrpcHost(instanceExecParams.host)); break; } } bucketSeq++; params.destinations.add(dest); } } else { for (int j = 0; j < destParams.instanceExecParams.size(); ++j) { TPlanFragmentDestination dest = new TPlanFragmentDestination(); dest.fragment_instance_id = destParams.instanceExecParams.get(j).instanceId; dest.server = toRpcHost(destParams.instanceExecParams.get(j).host); dest.setBrpcServer(toBrpcHost(destParams.instanceExecParams.get(j).host)); params.destinations.add(dest); } } } }
class Coordinator { private static final Logger LOG = LogManager.getLogger(Coordinator.class); private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private static String localIP = FrontendOptions.getLocalHostAddress(); private static Random instanceRandom = new Random(); Status queryStatus = new Status(); Map<TNetworkAddress, Long> addressToBackendID = Maps.newHashMap(); private ImmutableMap<Long, Backend> idToBackend = ImmutableMap.of(); private TDescriptorTable descTable; private TQueryGlobals queryGlobals = new TQueryGlobals(); private TQueryOptions queryOptions; private TNetworkAddress coordAddress; private Lock lock = new ReentrantLock(); private boolean returnedAllResults; private RuntimeProfile queryProfile; private List<RuntimeProfile> fragmentProfile; private Map<PlanFragmentId, FragmentExecParams> fragmentExecParamsMap = Maps.newHashMap(); private List<PlanFragment> fragments; private List<BackendExecState> backendExecStates = Lists.newArrayList(); private List<BackendExecState> needCheckBackendExecStates = Lists.newArrayList(); private ResultReceiver receiver; private List<ScanNode> scanNodes; private Set<TUniqueId> instanceIds = Sets.newHashSet(); private MarkedCountDownLatch<TUniqueId, Long> profileDoneSignal; private boolean isBlockQuery; private int numReceivedRows = 0; private List<String> deltaUrls; private Map<String, String> loadCounters; private String trackingUrl; private List<String> exportFiles; private List<TTabletCommitInfo> commitInfos = Lists.newArrayList(); private long jobId = -1; private TUniqueId queryId; private TResourceInfo tResourceInfo; private boolean needReport; private String clusterName; private final TUniqueId nextInstanceId; public Coordinator(ConnectContext context, Analyzer analyzer, Planner planner) { this.isBlockQuery = planner.isBlockQuery(); this.queryId = context.queryId(); this.fragments = planner.getFragments(); this.scanNodes = planner.getScanNodes(); this.descTable = analyzer.getDescTbl().toThrift(); this.returnedAllResults = false; this.queryOptions = context.getSessionVariable().toThrift(); this.queryGlobals.setNowString(DATE_FORMAT.format(new Date())); this.queryGlobals.setTimestampMs(new Date().getTime()); if (context.getSessionVariable().getTimeZone().equals("CST")) { this.queryGlobals.setTimeZone(TimeUtils.DEFAULT_TIME_ZONE); } else { this.queryGlobals.setTimeZone(context.getSessionVariable().getTimeZone()); } this.tResourceInfo = new TResourceInfo(context.getQualifiedUser(), context.getSessionVariable().getResourceGroup()); this.needReport = context.getSessionVariable().isReportSucc(); this.clusterName = context.getClusterName(); this.nextInstanceId = new TUniqueId(); nextInstanceId.setHi(queryId.hi); nextInstanceId.setLo(queryId.lo + 1); } public Coordinator(Long jobId, TUniqueId queryId, DescriptorTable descTable, List<PlanFragment> fragments, List<ScanNode> scanNodes, String cluster, String timezone) { this.isBlockQuery = true; this.jobId = jobId; this.queryId = queryId; this.descTable = descTable.toThrift(); this.fragments = fragments; this.scanNodes = scanNodes; this.queryOptions = new TQueryOptions(); this.queryGlobals.setNowString(DATE_FORMAT.format(new Date())); this.queryGlobals.setTimestampMs(new Date().getTime()); this.queryGlobals.setTimeZone(timezone); this.tResourceInfo = new TResourceInfo("", ""); this.needReport = true; this.clusterName = cluster; this.nextInstanceId = new TUniqueId(); nextInstanceId.setHi(queryId.hi); nextInstanceId.setLo(queryId.lo + 1); } public long getJobId() { return jobId; } public TUniqueId getQueryId() { return queryId; } public void setQueryId(TUniqueId queryId) { this.queryId = queryId; } public void setQueryType(TQueryType type) { this.queryOptions.setQueryType(type); } public Status getExecStatus() { return queryStatus; } public RuntimeProfile getQueryProfile() { return queryProfile; } public List<String> getDeltaUrls() { return deltaUrls; } public Map<String, String> getLoadCounters() { return loadCounters; } public String getTrackingUrl() { return trackingUrl; } public void setExecMemoryLimit(long execMemoryLimit) { this.queryOptions.setMemLimit(execMemoryLimit); } public void setLoadMemLimit(long loadMemLimit) { this.queryOptions.setLoadMemLimit(loadMemLimit); } public void setTimeout(int timeout) { this.queryOptions.setQueryTimeout(timeout); } public void clearExportStatus() { lock.lock(); try { this.backendExecStates.clear(); this.queryStatus.setStatus(new Status()); if (this.exportFiles == null) { this.exportFiles = Lists.newArrayList(); } this.exportFiles.clear(); this.needCheckBackendExecStates.clear(); } finally { lock.unlock(); } } public List<TTabletCommitInfo> getCommitInfos() { return commitInfos; } private void prepare() { for (PlanFragment fragment : fragments) { fragmentExecParamsMap.put(fragment.getFragmentId(), new FragmentExecParams(fragment)); } for (PlanFragment fragment : fragments) { if (!(fragment.getSink() instanceof DataStreamSink)) { continue; } FragmentExecParams params = fragmentExecParamsMap.get(fragment.getDestFragment().getFragmentId()); params.inputFragments.add(fragment.getFragmentId()); } coordAddress = new TNetworkAddress(localIP, Config.rpc_port); int fragmentSize = fragments.size(); queryProfile = new RuntimeProfile("Execution Profile " + DebugUtil.printId(queryId)); fragmentProfile = new ArrayList<RuntimeProfile>(); for (int i = 0; i < fragmentSize; i ++) { fragmentProfile.add(new RuntimeProfile("Fragment " + i)); queryProfile.addChild(fragmentProfile.get(i)); } this.idToBackend = Catalog.getCurrentSystemInfo().getIdToBackend(); if (LOG.isDebugEnabled()) { LOG.debug("idToBackend size={}", idToBackend.size()); for (Map.Entry<Long, Backend> entry : idToBackend.entrySet()) { Long backendID = entry.getKey(); Backend backend = entry.getValue(); LOG.debug("backend: {}-{}-{}", backendID, backend.getHost(), backend.getBePort()); } } } private void lock() { lock.lock(); } private void unlock() { lock.unlock(); } private void traceInstance() { if (LOG.isDebugEnabled()) { StringBuilder sb = new StringBuilder(); int idx = 0; sb.append("query id=").append(DebugUtil.printId(queryId)).append(","); sb.append("fragment=["); for (Map.Entry<PlanFragmentId, FragmentExecParams> entry : fragmentExecParamsMap.entrySet()) { if (idx++ != 0) { sb.append(","); } sb.append(entry.getKey()); entry.getValue().appendTo(sb); } sb.append("]"); LOG.debug(sb.toString()); } } public void exec() throws Exception { if (LOG.isDebugEnabled() && !scanNodes.isEmpty()) { LOG.debug("debug: in Coordinator::exec. query id: {}, planNode: {}", DebugUtil.printId(queryId), scanNodes.get(0).treeToThrift()); } if (LOG.isDebugEnabled() && !fragments.isEmpty()) { LOG.debug("debug: in Coordinator::exec. query id: {}, fragment: {}", DebugUtil.printId(queryId), fragments.get(0).toThrift()); } prepare(); computeScanRangeAssignment(); computeFragmentExecParams(); traceInstance(); PlanFragmentId topId = fragments.get(0).getFragmentId(); FragmentExecParams topParams = fragmentExecParamsMap.get(topId); if (topParams.fragment.getSink() instanceof ResultSink) { TNetworkAddress execBeAddr = topParams.instanceExecParams.get(0).host; receiver = new ResultReceiver( topParams.instanceExecParams.get(0).instanceId, addressToBackendID.get(execBeAddr), toBrpcHost(execBeAddr), queryOptions.query_timeout * 1000); LOG.info("dispatch query job: {} to {}", DebugUtil.printId(queryId), topParams.instanceExecParams.get(0).host); ResultSink resultSink = (ResultSink) topParams.fragment.getSink(); if (resultSink.isOutputFileSink() && resultSink.needBroker()) { FsBroker broker = Catalog.getCurrentCatalog().getBrokerMgr().getBroker(resultSink.getBrokerName(), execBeAddr.getHostname()); resultSink.setBrokerAddr(broker.ip, broker.port); LOG.info("OUTFILE through broker: {}:{}", broker.ip, broker.port); } } else { this.queryOptions.setIsReportSuccess(true); deltaUrls = Lists.newArrayList(); loadCounters = Maps.newHashMap(); List<Long> relatedBackendIds = Lists.newArrayList(addressToBackendID.values()); Catalog.getCurrentCatalog().getLoadManager().initJobProgress(jobId, queryId, instanceIds, relatedBackendIds); LOG.info("dispatch load job: {} to {}", DebugUtil.printId(queryId), addressToBackendID.keySet()); } profileDoneSignal = new MarkedCountDownLatch<TUniqueId, Long>(instanceIds.size()); for (TUniqueId instanceId : instanceIds) { profileDoneSignal.addMark(instanceId, -1L /* value is meaningless */); } lock(); try { int backendId = 0; int profileFragmentId = 0; long memoryLimit = queryOptions.getMemLimit(); for (PlanFragment fragment : fragments) { FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId()); int instanceNum = params.instanceExecParams.size(); Preconditions.checkState(instanceNum > 0); List<TExecPlanFragmentParams> tParams = params.toThrift(backendId); List<Pair<BackendExecState, Future<PExecPlanFragmentResult>>> futures = Lists.newArrayList(); if (colocateFragmentIds.contains(fragment.getFragmentId().asInt())) { int rate = Math.min(Config.query_colocate_join_memory_limit_penalty_factor, instanceNum); long newmemory = memoryLimit / rate; for (TExecPlanFragmentParams tParam : tParams) { tParam.query_options.setMemLimit(newmemory); } } boolean needCheckBackendState = false; if (queryOptions.getQueryType() == TQueryType.LOAD && profileFragmentId == 0) { needCheckBackendState = true; } int instanceId = 0; for (TExecPlanFragmentParams tParam : tParams) { BackendExecState execState = new BackendExecState(fragment.getFragmentId(), instanceId++, profileFragmentId, tParam, this.addressToBackendID); backendExecStates.add(execState); if (needCheckBackendState) { needCheckBackendExecStates.add(execState); if (LOG.isDebugEnabled()) { LOG.debug("add need check backend {} for fragment, {} job: {}", execState.backend.getId(), fragment.getFragmentId().asInt(), jobId); } } futures.add(Pair.create(execState, execState.execRemoteFragmentAsync())); backendId++; } for (Pair<BackendExecState, Future<PExecPlanFragmentResult>> pair : futures) { TStatusCode code = TStatusCode.INTERNAL_ERROR; String errMsg = null; try { PExecPlanFragmentResult result = pair.second.get(Config.remote_fragment_exec_timeout_ms, TimeUnit.MILLISECONDS); code = TStatusCode.findByValue(result.status.status_code); if (result.status.error_msgs != null && !result.status.error_msgs.isEmpty()) { errMsg = result.status.error_msgs.get(0); } } catch (ExecutionException e) { LOG.warn("catch a execute exception", e); code = TStatusCode.THRIFT_RPC_ERROR; } catch (InterruptedException e) { LOG.warn("catch a interrupt exception", e); code = TStatusCode.INTERNAL_ERROR; } catch (TimeoutException e) { LOG.warn("catch a timeout exception", e); code = TStatusCode.TIMEOUT; } if (code != TStatusCode.OK) { if (errMsg == null) { errMsg = "exec rpc error. backend id: " + pair.first.backend.getId(); } queryStatus.setStatus(errMsg); LOG.warn("exec plan fragment failed, errmsg={}, code: {}, fragmentId={}, backend={}:{}", errMsg, code, fragment.getFragmentId(), pair.first.address.hostname, pair.first.address.port); cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR); switch (code) { case TIMEOUT: throw new UserException("query timeout. backend id: " + pair.first.backend.getId()); case THRIFT_RPC_ERROR: SimpleScheduler.addToBlacklist(pair.first.backend.getId()); throw new RpcException(pair.first.backend.getHost(), "rpc failed"); default: throw new UserException(errMsg); } } } profileFragmentId += 1; } attachInstanceProfileToFragmentProfile(); } finally { unlock(); } } public List<String> getExportFiles() { return exportFiles; } void updateExportFiles(List<String> files) { lock.lock(); try { if (exportFiles == null) { exportFiles = Lists.newArrayList(); } exportFiles.addAll(files); } finally { lock.unlock(); } } void updateDeltas(List<String> urls) { lock.lock(); try { deltaUrls.addAll(urls); } finally { lock.unlock(); } } private void updateLoadCounters(Map<String, String> newLoadCounters) { lock.lock(); try { long numRowsNormal = 0L; String value = this.loadCounters.get(LoadEtlTask.DPP_NORMAL_ALL); if (value != null) { numRowsNormal = Long.valueOf(value); } long numRowsAbnormal = 0L; value = this.loadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL); if (value != null) { numRowsAbnormal = Long.valueOf(value); } long numRowsUnselected = 0L; value = this.loadCounters.get(LoadJob.UNSELECTED_ROWS); if (value != null) { numRowsUnselected = Long.valueOf(value); } value = newLoadCounters.get(LoadEtlTask.DPP_NORMAL_ALL); if (value != null) { numRowsNormal += Long.valueOf(value); } value = newLoadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL); if (value != null) { numRowsAbnormal += Long.valueOf(value); } value = newLoadCounters.get(LoadJob.UNSELECTED_ROWS); if (value != null) { numRowsUnselected += Long.valueOf(value); } this.loadCounters.put(LoadEtlTask.DPP_NORMAL_ALL, "" + numRowsNormal); this.loadCounters.put(LoadEtlTask.DPP_ABNORMAL_ALL, "" + numRowsAbnormal); this.loadCounters.put(LoadJob.UNSELECTED_ROWS, "" + numRowsUnselected); } finally { lock.unlock(); } } private void updateCommitInfos(List<TTabletCommitInfo> commitInfos) { lock.lock(); try { this.commitInfos.addAll(commitInfos); } finally { lock.unlock(); } } private void updateStatus(Status status, TUniqueId instanceId) { lock.lock(); try { if (returnedAllResults && status.isCancelled()) { return; } if (status.ok()) { return; } if (!queryStatus.ok()) { return; } queryStatus.setStatus(status); LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}, query id: {}, instance id: {}", jobId, DebugUtil.printId(queryId), instanceId != null ? DebugUtil.printId(instanceId) : "NaN"); cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR); } finally { lock.unlock(); } } public RowBatch getNext() throws Exception { if (receiver == null) { throw new UserException("There is no receiver."); } RowBatch resultBatch; Status status = new Status(); resultBatch = receiver.getNext(status); if (!status.ok()) { LOG.warn("get next fail, need cancel. query id: {}", DebugUtil.printId(queryId)); } updateStatus(status, null /* no instance id */); Status copyStatus = null; lock(); try { copyStatus = new Status(queryStatus); } finally { unlock(); } if (!copyStatus.ok()) { if (Strings.isNullOrEmpty(copyStatus.getErrorMsg())) { copyStatus.rewriteErrorMsg(); } if (copyStatus.isRpcError()) { throw new RpcException("unknown", copyStatus.getErrorMsg()); } else { String errMsg = copyStatus.getErrorMsg(); LOG.warn("query failed: {}", errMsg); int hostIndex = errMsg.indexOf("host"); if (hostIndex != -1) { errMsg = errMsg.substring(0, hostIndex); } throw new UserException(errMsg); } } if (resultBatch.isEos()) { this.returnedAllResults = true; Long numLimitRows = fragments.get(0).getPlanRoot().getLimit(); boolean hasLimit = numLimitRows > 0; if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) { LOG.debug("no block query, return num >= limit rows, need cancel"); cancelInternal(PPlanFragmentCancelReason.LIMIT_REACH); } } else { numReceivedRows += resultBatch.getBatch().getRowsSize(); } return resultBatch; } public void cancel() { lock(); try { if (!queryStatus.ok()) { return; } else { queryStatus.setStatus(Status.CANCELLED); } LOG.warn("cancel execution of query, this is outside invoke"); cancelInternal(PPlanFragmentCancelReason.USER_CANCEL); } finally { unlock(); } } private void cancelInternal(PPlanFragmentCancelReason cancelReason) { if (null != receiver) { receiver.cancel(); } cancelRemoteFragmentsAsync(cancelReason); if (profileDoneSignal != null) { profileDoneSignal.countDownToZero(new Status()); LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks().stream().map(e->DebugUtil.printId(e.getKey())).toArray()); } } private void cancelRemoteFragmentsAsync(PPlanFragmentCancelReason cancelReason) { for (BackendExecState backendExecState : backendExecStates) { backendExecState.cancelFragmentInstance(cancelReason); } } private TNetworkAddress toRpcHost(TNetworkAddress host) throws Exception { Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort( host.getHostname(), host.getPort()); if (backend == null) { throw new UserException("there is no scanNode Backend"); } TNetworkAddress dest = new TNetworkAddress(backend.getHost(), backend.getBeRpcPort()); return dest; } private TNetworkAddress toBrpcHost(TNetworkAddress host) throws Exception { Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort( host.getHostname(), host.getPort()); if (backend == null) { throw new UserException("there is no scanNode Backend"); } if (backend.getBrpcPort() < 0) { return null; } return new TNetworkAddress(backend.getHost(), backend.getBrpcPort()); } private boolean containsUnionNode(PlanNode node) { if (node instanceof UnionNode) { return true; } for (PlanNode child : node.getChildren()) { if (child instanceof ExchangeNode) { continue; } else if (child instanceof UnionNode) { return true; } else { return containsUnionNode(child); } } return false; } private boolean containsIntersectNode(PlanNode node) { if (node instanceof IntersectNode) { return true; } for (PlanNode child : node.getChildren()) { if (child instanceof ExchangeNode) { continue; } else if (child instanceof IntersectNode) { return true; } else { return containsIntersectNode(child); } } return false; } private boolean containsExceptNode(PlanNode node) { if (node instanceof ExceptNode) { return true; } for (PlanNode child : node.getChildren()) { if (child instanceof ExchangeNode) { continue; } else if (child instanceof ExceptNode) { return true; } else { return containsExceptNode(child); } } return false; } private boolean containsSetOperationNode(PlanNode node) { if (node instanceof SetOperationNode) { return true; } for (PlanNode child : node.getChildren()) { if (child instanceof ExchangeNode) { continue; } else if (child instanceof SetOperationNode) { return true; } else { return containsSetOperationNode(child); } } return false; } private void computeFragmentHosts() throws Exception { for (int i = fragments.size() - 1; i >= 0; --i) { PlanFragment fragment = fragments.get(i); FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId()); if (fragment.getDataPartition() == DataPartition.UNPARTITIONED) { Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef); if (execHostport == null) { LOG.warn("DataPartition UNPARTITIONED, no scanNode Backend"); throw new UserException("there is no scanNode Backend"); } this.addressToBackendID.put(execHostport, backendIdRef.getRef()); FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport, 0, params); params.instanceExecParams.add(instanceParam); continue; } Pair<PlanNode, PlanNode> pairNodes = findLeftmostNode(fragment.getPlanRoot()); PlanNode fatherNode = pairNodes.first; PlanNode leftMostNode = pairNodes.second; /* * Case A: * if the left most is ScanNode, which means there is no child fragment, * we should assign fragment instances on every scan node hosts. * Case B: * if not, there should be exchange nodes to collect all data from child fragments(input fragments), * so we should assign fragment instances corresponding to the child fragments' host */ if (!(leftMostNode instanceof ScanNode)) { int inputFragmentIndex = 0; int maxParallelism = 0; int childrenCount = (fatherNode != null) ? fatherNode.getChildren().size() : 1; for (int j = 0; j < childrenCount; j++) { int currentChildFragmentParallelism = fragmentExecParamsMap.get(fragment.getChild(j).getFragmentId()).instanceExecParams.size(); if (currentChildFragmentParallelism > maxParallelism) { maxParallelism = currentChildFragmentParallelism; inputFragmentIndex = j; } } PlanFragmentId inputFragmentId = fragment.getChild(inputFragmentIndex).getFragmentId(); int exchangeInstances = -1; if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable() != null) { exchangeInstances = ConnectContext.get().getSessionVariable().getExchangeInstanceParallel(); } if (exchangeInstances > 0 && fragmentExecParamsMap.get(inputFragmentId).instanceExecParams.size() > exchangeInstances) { Set<TNetworkAddress> hostSet = Sets.newHashSet(); for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentId).instanceExecParams) { hostSet.add(execParams.host); } List<TNetworkAddress> hosts = Lists.newArrayList(hostSet); Collections.shuffle(hosts, instanceRandom); for (int index = 0; index < exchangeInstances; index++) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, hosts.get(index % hosts.size()), 0, params); params.instanceExecParams.add(instanceParam); } } else { for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentId).instanceExecParams) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, execParams.host, 0, params); params.instanceExecParams.add(instanceParam); } } Collections.shuffle(params.instanceExecParams, instanceRandom); continue; } int parallelExecInstanceNum = fragment.getParallelExecNum(); if ((isColocateJoin(fragment.getPlanRoot()) && fragmentIdToSeqToAddressMap.containsKey(fragment.getFragmentId()) && fragmentIdToSeqToAddressMap.get(fragment.getFragmentId()).size() > 0)) { computeColocateJoinInstanceParam(fragment.getFragmentId(), parallelExecInstanceNum, params); } else if (bucketShuffleJoinController.isBucketShuffleJoin(fragment.getFragmentId().asInt())) { bucketShuffleJoinController.computeInstanceParam(fragment.getFragmentId(), parallelExecInstanceNum, params); } else { Iterator iter = fragmentExecParamsMap.get(fragment.getFragmentId()).scanRangeAssignment.entrySet().iterator(); while (iter.hasNext()) { Map.Entry entry = (Map.Entry) iter.next(); TNetworkAddress key = (TNetworkAddress) entry.getKey(); Map<Integer, List<TScanRangeParams>> value = (Map<Integer, List<TScanRangeParams>>) entry.getValue(); for (Integer planNodeId : value.keySet()) { List<TScanRangeParams> perNodeScanRanges = value.get(planNodeId); int expectedInstanceNum = 1; if (parallelExecInstanceNum > 1) { expectedInstanceNum = Math.min(perNodeScanRanges.size(), parallelExecInstanceNum); } List<List<TScanRangeParams>> perInstanceScanRanges = ListUtil.splitBySize(perNodeScanRanges, expectedInstanceNum); for (List<TScanRangeParams> scanRangeParams : perInstanceScanRanges) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, key, 0, params); instanceParam.perNodeScanRanges.put(planNodeId, scanRangeParams); params.instanceExecParams.add(instanceParam); } } } } if (params.instanceExecParams.isEmpty()) { Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef); if (execHostport == null) { throw new UserException("there is no scanNode Backend"); } this.addressToBackendID.put(execHostport, backendIdRef.getRef()); FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport, 0, params); params.instanceExecParams.add(instanceParam); } } } private boolean isColocateJoin(PlanNode node) { if (Config.disable_colocate_join) { return false; } if (ConnectContext.get() != null) { if (ConnectContext.get().getSessionVariable().isDisableColocateJoin()) { return false; } } if (colocateFragmentIds.contains(node.getFragmentId().asInt())) { return true; } if (node instanceof HashJoinNode) { HashJoinNode joinNode = (HashJoinNode) node; if (joinNode.isColocate()) { colocateFragmentIds.add(joinNode.getFragmentId().asInt()); return true; } } for (PlanNode childNode : node.getChildren()) { return isColocateJoin(childNode); } return false; } private Pair<PlanNode, PlanNode> findLeftmostNode(PlanNode plan) { PlanNode newPlan = plan; PlanNode fatherPlan = null; while (newPlan.getChildren().size() != 0 && !(newPlan instanceof ExchangeNode)) { fatherPlan = newPlan; newPlan = newPlan.getChild(0); } return new Pair<PlanNode, PlanNode>(fatherPlan, newPlan); } private <K, V> V findOrInsert(HashMap<K, V> m, final K key, final V defaultVal) { V value = m.get(key); if (value == null) { m.put(key, defaultVal); value = defaultVal; } return value; } private List<TScanRangeParams> findOrInsert(Map<Integer, List<TScanRangeParams>> m, Integer key, ArrayList<TScanRangeParams> defaultVal) { List<TScanRangeParams> value = m.get(key); if (value == null) { m.put(key, defaultVal); value = defaultVal; } return value; } private long getScanRangeLength(final TScanRange scanRange) { return 1; } private void computeColocateJoinInstanceParam(PlanFragmentId fragmentId, int parallelExecInstanceNum, FragmentExecParams params) { Map<Integer, TNetworkAddress> bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(fragmentId); Map<TNetworkAddress, List<Map<Integer, List<TScanRangeParams>>>> addressToScanRanges = Maps.newHashMap(); for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) { TNetworkAddress address = bucketSeqToAddress.get(scanRanges.getKey()); Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue(); if (!addressToScanRanges.containsKey(address)) { addressToScanRanges.put(address, Lists.newArrayList()); } addressToScanRanges.get(address).add(nodeScanRanges); } for (Map.Entry<TNetworkAddress, List<Map<Integer, List<TScanRangeParams>>>> addressScanRange : addressToScanRanges.entrySet()) { List<Map<Integer, List<TScanRangeParams>>> scanRange = addressScanRange.getValue(); int expectedInstanceNum = 1; if (parallelExecInstanceNum > 1) { expectedInstanceNum = Math.min(scanRange.size(), parallelExecInstanceNum); } List<List<Map<Integer, List<TScanRangeParams>>>> perInstanceScanRanges = ListUtil.splitBySize(scanRange, expectedInstanceNum); for (List<Map<Integer, List<TScanRangeParams>>> perInstanceScanRange : perInstanceScanRanges) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, addressScanRange.getKey(), 0, params); for (Map<Integer, List<TScanRangeParams>> nodeScanRangeMap : perInstanceScanRange) { for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRangeMap.entrySet()) { if (!instanceParam.perNodeScanRanges.containsKey(nodeScanRange.getKey())) { instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue()); } else { instanceParam.perNodeScanRanges.get(nodeScanRange.getKey()).addAll(nodeScanRange.getValue()); } } } params.instanceExecParams.add(instanceParam); } } } private void computeScanRangeAssignment() throws Exception { for (ScanNode scanNode : scanNodes) { List<TScanRangeLocations> locations = scanNode.getScanRangeLocations(0); if (locations == null) { continue; } FragmentScanRangeAssignment assignment = fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment; if (isColocateJoin(scanNode.getFragment().getPlanRoot())) { computeScanRangeAssignmentByColocate((OlapScanNode) scanNode, assignment); } else if (bucketShuffleJoinController.isBucketShuffleJoin(scanNode.getFragmentId().asInt(), scanNode.getFragment().getPlanRoot())) { bucketShuffleJoinController.computeScanRangeAssignmentByBucket((OlapScanNode) scanNode, idToBackend, addressToBackendID); } else { computeScanRangeAssignmentByScheduler(scanNode, locations, assignment); } } } private void computeScanRangeAssignmentByColocate( final OlapScanNode scanNode, FragmentScanRangeAssignment assignment) throws Exception { if (!fragmentIdToSeqToAddressMap.containsKey(scanNode.getFragmentId())) { fragmentIdToSeqToAddressMap.put(scanNode.getFragmentId(), new HashedMap()); } Map<Integer, TNetworkAddress> bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(scanNode.getFragmentId()); for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) { List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq); if (!bucketSeqToAddress.containsKey(bucketSeq)) { getExecHostPortForFragmentIDAndBucketSeq(locations.get(0), scanNode.getFragmentId(), bucketSeq); } for(TScanRangeLocations location: locations) { Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>()); List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>()); TScanRangeParams scanRangeParams = new TScanRangeParams(); scanRangeParams.scan_range = location.scan_range; scanRangeParamsList.add(scanRangeParams); } } } private void getExecHostPortForFragmentIDAndBucketSeq(TScanRangeLocations seqLocation, PlanFragmentId fragmentId, Integer bucketSeq) throws Exception { int randomLocation = new Random().nextInt(seqLocation.locations.size()); Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostPort = SimpleScheduler.getHost(seqLocation.locations.get(randomLocation).backend_id, seqLocation.locations, this.idToBackend, backendIdRef); if (execHostPort == null) { throw new UserException("there is no scanNode Backend"); } this.addressToBackendID.put(execHostPort, backendIdRef.getRef()); this.fragmentIdToSeqToAddressMap.get(fragmentId).put(bucketSeq, execHostPort); } private void computeScanRangeAssignmentByScheduler( final ScanNode scanNode, final List<TScanRangeLocations> locations, FragmentScanRangeAssignment assignment) throws Exception { HashMap<TNetworkAddress, Long> assignedBytesPerHost = Maps.newHashMap(); for (TScanRangeLocations scanRangeLocations : locations) { Long minAssignedBytes = Long.MAX_VALUE; TScanRangeLocation minLocation = null; for (final TScanRangeLocation location : scanRangeLocations.getLocations()) { Long assignedBytes = findOrInsert(assignedBytesPerHost, location.server, 0L); if (assignedBytes < minAssignedBytes) { minAssignedBytes = assignedBytes; minLocation = location; } } Long scanRangeLength = getScanRangeLength(scanRangeLocations.scan_range); assignedBytesPerHost.put(minLocation.server, assignedBytesPerHost.get(minLocation.server) + scanRangeLength); Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostPort = SimpleScheduler.getHost(minLocation.backend_id, scanRangeLocations.getLocations(), this.idToBackend, backendIdRef); if (execHostPort == null) { throw new UserException("there is no scanNode Backend"); } this.addressToBackendID.put(execHostPort, backendIdRef.getRef()); Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(assignment, execHostPort, new HashMap<Integer, List<TScanRangeParams>>()); List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>()); TScanRangeParams scanRangeParams = new TScanRangeParams(); scanRangeParams.scan_range = scanRangeLocations.scan_range; scanRangeParams.setVolumeId(minLocation.volume_id); scanRangeParamsList.add(scanRangeParams); } } public void updateFragmentExecStatus(TReportExecStatusParams params) { if (params.backend_num >= backendExecStates.size()) { LOG.warn("unknown backend number: {}, expected less than: {}", params.backend_num, backendExecStates.size()); return; } BackendExecState execState = backendExecStates.get(params.backend_num); if (!execState.updateProfile(params)) { return; } if (LOG.isDebugEnabled()) { StringBuilder builder = new StringBuilder(); execState.printProfile(builder); LOG.debug("profile for query_id={} instance_id={}\n{}", DebugUtil.printId(queryId), DebugUtil.printId(params.getFragmentInstanceId()), builder.toString()); } Status status = new Status(params.status); if (!(returnedAllResults && status.isCancelled()) && !status.ok()) { LOG.warn("one instance report fail, query_id={} instance_id={}", DebugUtil.printId(queryId), DebugUtil.printId(params.getFragmentInstanceId())); updateStatus(status, params.getFragmentInstanceId()); } if (execState.done) { if (params.isSetDeltaUrls()) { updateDeltas(params.getDeltaUrls()); } if (params.isSetLoadCounters()) { updateLoadCounters(params.getLoadCounters()); } if (params.isSetTrackingUrl()) { trackingUrl = params.getTrackingUrl(); } if (params.isSetExportFiles()) { updateExportFiles(params.getExportFiles()); } if (params.isSetCommitInfos()) { updateCommitInfos(params.getCommitInfos()); } profileDoneSignal.markedCountDown(params.getFragmentInstanceId(), -1L); } if (params.isSetLoadedRows()) { Catalog.getCurrentCatalog().getLoadManager().updateJobPrgress( jobId, params.backend_id, params.query_id, params.fragment_instance_id, params.loaded_rows, params.done); } return; } public void endProfile() { if (backendExecStates.isEmpty()) { return; } if (needReport) { try { profileDoneSignal.await(2, TimeUnit.SECONDS); } catch (InterruptedException e1) { LOG.warn("signal await error", e1); } } for (int i = 1; i < fragmentProfile.size(); ++i) { fragmentProfile.get(i).sortChildren(); } } /* * Waiting the coordinator finish executing. * return false if waiting timeout. * return true otherwise. * NOTICE: return true does not mean that coordinator executed success, * the caller should check queryStatus for result. * * We divide the entire waiting process into multiple rounds, * with a maximum of 30 seconds per round. And after each round of waiting, * check the status of the BE. If the BE status is abnormal, the wait is ended * and the result is returned. Otherwise, continue to the next round of waiting. * This method mainly avoids the problem that the Coordinator waits for a long time * after some BE can no long return the result due to some exception, such as BE is down. */ public boolean join(int timeoutS) { final long fixedMaxWaitTime = 30; long leftTimeoutS = timeoutS; while (leftTimeoutS > 0) { long waitTime = Math.min(leftTimeoutS, fixedMaxWaitTime); boolean awaitRes = false; try { awaitRes = profileDoneSignal.await(waitTime, TimeUnit.SECONDS); } catch (InterruptedException e) { } if (awaitRes) { return true; } if (!checkBackendState()) { return true; } leftTimeoutS -= waitTime; } return false; } /* * Check the state of backends in needCheckBackendExecStates. * return true if all of them are OK. Otherwise, return false. */ private boolean checkBackendState() { for (BackendExecState backendExecState : needCheckBackendExecStates) { if (!backendExecState.isBackendStateHealthy()) { queryStatus = new Status(TStatusCode.INTERNAL_ERROR, "backend " + backendExecState.backend.getId() + " is down"); return false; } } return true; } public boolean isDone() { return profileDoneSignal.getCount() == 0; } class FragmentScanRangeAssignment extends HashMap<TNetworkAddress, Map<Integer, List<TScanRangeParams>>> { } class BucketSeqToScanRange extends HashMap<Integer, Map<Integer, List<TScanRangeParams>>> { } class BucketShuffleJoinController { private Map<PlanFragmentId, BucketSeqToScanRange> fragmentIdBucketSeqToScanRangeMap = Maps.newHashMap(); private Map<PlanFragmentId, Map<Integer, TNetworkAddress>> fragmentIdToSeqToAddressMap = Maps.newHashMap(); private Map<PlanFragmentId, Map<Long, Integer>> fragmentIdToBuckendIdBucketCountMap = Maps.newHashMap(); private Map<PlanFragmentId, Integer> fragmentIdToBucketNumMap = Maps.newHashMap(); private Set<Integer> bucketShuffleFragmentIds = new HashSet<>(); private boolean isBucketShuffleJoin(int fragmentId, PlanNode node) { if (ConnectContext.get() != null) { if (!ConnectContext.get().getSessionVariable().isEnableBucketShuffleJoin()) { return false; } } if (fragmentId != node.getFragmentId().asInt()) { return false; } if (bucketShuffleFragmentIds.contains(fragmentId)) { return true; } if (node instanceof HashJoinNode) { HashJoinNode joinNode = (HashJoinNode) node; if (joinNode.isBucketShuffle()) { bucketShuffleFragmentIds.add(joinNode.getFragmentId().asInt()); return true; } } for (PlanNode childNode : node.getChildren()) { return isBucketShuffleJoin(fragmentId, childNode); } return false; } private boolean isBucketShuffleJoin(int fragmentId) { return bucketShuffleFragmentIds.contains(fragmentId); } private int getFragmentBucketNum(PlanFragmentId fragmentId) { return fragmentIdToBucketNumMap.get(fragmentId); } private void getExecHostPortForFragmentIDAndBucketSeq(TScanRangeLocations seqLocation, PlanFragmentId fragmentId, Integer bucketSeq, ImmutableMap<Long, Backend> idToBackend, Map<TNetworkAddress, Long> addressToBackendID) throws Exception { Map<Long, Integer> buckendIdToBucketCountMap = fragmentIdToBuckendIdBucketCountMap.get(fragmentId); int maxBucketNum = Integer.MAX_VALUE; long buckendId = Long.MAX_VALUE; for (TScanRangeLocation location : seqLocation.locations) { if (buckendIdToBucketCountMap.containsKey(location.backend_id)) { if (buckendIdToBucketCountMap.get(location.backend_id) < maxBucketNum) { maxBucketNum = buckendIdToBucketCountMap.get(location.backend_id); buckendId = location.backend_id; } } else { maxBucketNum = 0; buckendId = location.backend_id; buckendIdToBucketCountMap.put(buckendId, 0); break; } } buckendIdToBucketCountMap.put(buckendId, buckendIdToBucketCountMap.get(buckendId) + 1); Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostPort = SimpleScheduler.getHost(buckendId, seqLocation.locations, idToBackend, backendIdRef); if (execHostPort == null) { throw new UserException("there is no scanNode Backend"); } addressToBackendID.put(execHostPort, backendIdRef.getRef()); this.fragmentIdToSeqToAddressMap.get(fragmentId).put(bucketSeq, execHostPort); } private void computeScanRangeAssignmentByBucket( final OlapScanNode scanNode, ImmutableMap<Long, Backend> idToBackend, Map<TNetworkAddress, Long> addressToBackendID) throws Exception { if (!fragmentIdToSeqToAddressMap.containsKey(scanNode.getFragmentId())) { fragmentIdToBucketNumMap.put(scanNode.getFragmentId(), scanNode.getOlapTable().getDefaultDistributionInfo().getBucketNum()); fragmentIdToSeqToAddressMap.put(scanNode.getFragmentId(), new HashedMap()); fragmentIdBucketSeqToScanRangeMap.put(scanNode.getFragmentId(), new BucketSeqToScanRange()); fragmentIdToBuckendIdBucketCountMap.put(scanNode.getFragmentId(), new HashMap<>()); } Map<Integer, TNetworkAddress> bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(scanNode.getFragmentId()); BucketSeqToScanRange bucketSeqToScanRange = fragmentIdBucketSeqToScanRangeMap.get(scanNode.getFragmentId()); for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) { List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq); if (!bucketSeqToAddress.containsKey(bucketSeq)) { getExecHostPortForFragmentIDAndBucketSeq(locations.get(0), scanNode.getFragmentId(), bucketSeq, idToBackend, addressToBackendID); } for(TScanRangeLocations location: locations) { Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>()); List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>()); TScanRangeParams scanRangeParams = new TScanRangeParams(); scanRangeParams.scan_range = location.scan_range; scanRangeParamsList.add(scanRangeParams); } } } private void computeInstanceParam(PlanFragmentId fragmentId, int parallelExecInstanceNum, FragmentExecParams params) { Map<Integer, TNetworkAddress> bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(fragmentId); BucketSeqToScanRange bucketSeqToScanRange = fragmentIdBucketSeqToScanRangeMap.get(fragmentId); Map<TNetworkAddress, List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>>> addressToScanRanges = Maps.newHashMap(); for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) { TNetworkAddress address = bucketSeqToAddress.get(scanRanges.getKey()); Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue(); if (!addressToScanRanges.containsKey(address)) { addressToScanRanges.put(address, Lists.newArrayList()); } addressToScanRanges.get(address).add(scanRanges); } for (Map.Entry<TNetworkAddress, List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>>> addressScanRange : addressToScanRanges.entrySet()) { List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>> scanRange = addressScanRange.getValue(); int expectedInstanceNum = 1; if (parallelExecInstanceNum > 1) { expectedInstanceNum = Math.min(scanRange.size(), parallelExecInstanceNum); } List<List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>>> perInstanceScanRanges = ListUtil.splitBySize(scanRange, expectedInstanceNum); for (List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>> perInstanceScanRange : perInstanceScanRanges) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, addressScanRange.getKey(), 0, params); for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> nodeScanRangeMap : perInstanceScanRange) { instanceParam.addBucketSeq(nodeScanRangeMap.getKey()); for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRangeMap.getValue().entrySet()) { if (!instanceParam.perNodeScanRanges.containsKey(nodeScanRange.getKey())) { instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue()); } else { instanceParam.perNodeScanRanges.get(nodeScanRange.getKey()).addAll(nodeScanRange.getValue()); } } } params.instanceExecParams.add(instanceParam); } } } } private BucketShuffleJoinController bucketShuffleJoinController = new BucketShuffleJoinController(); private BucketSeqToScanRange bucketSeqToScanRange = new BucketSeqToScanRange(); private Map<PlanFragmentId, Map<Integer, TNetworkAddress>> fragmentIdToSeqToAddressMap = Maps.newHashMap(); private Set<Integer> colocateFragmentIds = new HashSet<>(); public class BackendExecState { TExecPlanFragmentParams rpcParams; PlanFragmentId fragmentId; int instanceId; boolean initiated; boolean done; boolean hasCanceled; int profileFragmentId; RuntimeProfile profile; TNetworkAddress address; Backend backend; long lastMissingHeartbeatTime = -1; public BackendExecState(PlanFragmentId fragmentId, int instanceId, int profileFragmentId, TExecPlanFragmentParams rpcParams, Map<TNetworkAddress, Long> addressToBackendID) { this.profileFragmentId = profileFragmentId; this.fragmentId = fragmentId; this.instanceId = instanceId; this.rpcParams = rpcParams; this.initiated = false; this.done = false; this.address = fragmentExecParamsMap.get(fragmentId).instanceExecParams.get(instanceId).host; this.backend = idToBackend.get(addressToBackendID.get(address)); String name = "Instance " + DebugUtil.printId(fragmentExecParamsMap.get(fragmentId) .instanceExecParams.get(instanceId).instanceId) + " (host=" + address + ")"; this.profile = new RuntimeProfile(name); this.hasCanceled = false; this.lastMissingHeartbeatTime = backend.getLastMissingHeartbeatTime(); } public synchronized boolean updateProfile(TReportExecStatusParams params) { if (this.done) { return false; } if (params.isSetProfile()) { profile.update(params.profile); } this.done = params.done; return true; } public synchronized void printProfile(StringBuilder builder) { this.profile.computeTimeInProfile(); this.profile.prettyPrint(builder, ""); } public synchronized boolean cancelFragmentInstance(PPlanFragmentCancelReason cancelReason) { if (LOG.isDebugEnabled()) { LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} backend: {}, fragment instance id={}, reason: {}", this.initiated, this.done, this.hasCanceled, backend.getId(), DebugUtil.printId(fragmentInstanceId()), cancelReason.name()); } try { if (!this.initiated) { return false; } if (this.done) { return false; } if (this.hasCanceled) { return false; } TNetworkAddress brpcAddress = toBrpcHost(address); try { BackendServiceProxy.getInstance().cancelPlanFragmentAsync(brpcAddress, fragmentInstanceId(), cancelReason); } catch (RpcException e) { LOG.warn("cancel plan fragment get a exception, address={}:{}", brpcAddress.getHostname(), brpcAddress.getPort()); SimpleScheduler.addToBlacklist(addressToBackendID.get(brpcAddress)); } this.hasCanceled = true; } catch (Exception e) { LOG.warn("catch a exception", e); return false; } return true; } public synchronized boolean computeTimeInProfile(int maxFragmentId) { if (this.profileFragmentId < 0 || this.profileFragmentId > maxFragmentId) { LOG.warn("profileFragmentId {} should be in [0, {})", profileFragmentId, maxFragmentId); return false; } profile.computeTimeInProfile(); return true; } public boolean isBackendStateHealthy() { if (backend.getLastMissingHeartbeatTime() > lastMissingHeartbeatTime) { LOG.warn("backend {} is down while joining the coordinator. job id: {}", backend.getId(), jobId); return false; } return true; } public Future<PExecPlanFragmentResult> execRemoteFragmentAsync() throws TException, RpcException { TNetworkAddress brpcAddress = null; try { brpcAddress = new TNetworkAddress(backend.getHost(), backend.getBrpcPort()); } catch (Exception e) { throw new TException(e.getMessage()); } this.initiated = true; try { return BackendServiceProxy.getInstance().execPlanFragmentAsync(brpcAddress, rpcParams); } catch (RpcException e) { return new Future<PExecPlanFragmentResult>() { @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; } @Override public boolean isCancelled() { return false; } @Override public boolean isDone() { return true; } @Override public PExecPlanFragmentResult get() { PExecPlanFragmentResult result = new PExecPlanFragmentResult(); PStatus pStatus = new PStatus(); pStatus.error_msgs = Lists.newArrayList(); pStatus.error_msgs.add(e.getMessage()); pStatus.status_code = TStatusCode.THRIFT_RPC_ERROR.getValue(); result.status = pStatus; return result; } @Override public PExecPlanFragmentResult get(long timeout, TimeUnit unit) { return get(); } }; } } public FragmentInstanceInfo buildFragmentInstanceInfo() { return new QueryStatisticsItem.FragmentInstanceInfo.Builder() .instanceId(fragmentInstanceId()).fragmentId(String.valueOf(fragmentId)).address(this.address) .build(); } private TUniqueId fragmentInstanceId() { return this.rpcParams.params.getFragmentInstanceId(); } } protected class FragmentExecParams { public PlanFragment fragment; public List<TPlanFragmentDestination> destinations = Lists.newArrayList(); public Map<Integer, Integer> perExchNumSenders = Maps.newHashMap(); public List<PlanFragmentId> inputFragments = Lists.newArrayList(); public List<FInstanceExecParam> instanceExecParams = Lists.newArrayList(); public FragmentScanRangeAssignment scanRangeAssignment = new FragmentScanRangeAssignment(); public FragmentExecParams(PlanFragment fragment) { this.fragment = fragment; } List<TExecPlanFragmentParams> toThrift(int backendNum) { List<TExecPlanFragmentParams> paramsList = Lists.newArrayList(); for (int i = 0; i < instanceExecParams.size(); ++i) { final FInstanceExecParam instanceExecParam = instanceExecParams.get(i); TExecPlanFragmentParams params = new TExecPlanFragmentParams(); params.setProtocolVersion(PaloInternalServiceVersion.V1); params.setFragment(fragment.toThrift()); params.setDescTbl(descTable); params.setParams(new TPlanFragmentExecParams()); params.setResourceInfo(tResourceInfo); params.params.setQueryId(queryId); params.params.setFragmentInstanceId(instanceExecParam.instanceId); Map<Integer, List<TScanRangeParams>> scanRanges = instanceExecParam.perNodeScanRanges; if (scanRanges == null) { scanRanges = Maps.newHashMap(); } params.params.setPerNodeScanRanges(scanRanges); params.params.setPerExchNumSenders(perExchNumSenders); params.params.setDestinations(destinations); params.params.setSenderId(i); params.params.setNumSenders(instanceExecParams.size()); params.setCoord(coordAddress); params.setBackendNum(backendNum++); params.setQueryGlobals(queryGlobals); params.setQueryOptions(queryOptions); params.params.setSendQueryStatisticsWithEveryBatch( fragment.isTransferQueryStatisticsWithEveryBatch()); if (queryOptions.getQueryType() == TQueryType.LOAD) { LoadErrorHub.Param param = Catalog.getCurrentCatalog().getLoadInstance().getLoadErrorHubInfo(); if (param != null) { TLoadErrorHubInfo info = param.toThrift(); if (info != null) { params.setLoadErrorHubInfo(info); } } } paramsList.add(params); } return paramsList; } public void appendScanRange(StringBuilder sb, List<TScanRangeParams> params) { sb.append("range=["); int idx = 0; for (TScanRangeParams range : params) { TPaloScanRange paloScanRange = range.getScanRange().getPaloScanRange(); if (paloScanRange != null) { if (idx++ != 0) { sb.append(","); } sb.append("{tid=").append(paloScanRange.getTabletId()) .append(",ver=").append(paloScanRange.getVersion()).append("}"); } TEsScanRange esScanRange = range.getScanRange().getEsScanRange(); if (esScanRange != null) { sb.append("{ index=").append(esScanRange.getIndex()) .append(", shardid=").append(esScanRange.getShardId()) .append("}"); } } sb.append("]"); } public void appendTo(StringBuilder sb) { sb.append("{plan="); fragment.getPlanRoot().appendTrace(sb); sb.append(",instance=["); for (int i = 0; i < instanceExecParams.size(); ++i) { if (i != 0) { sb.append(","); } TNetworkAddress address = instanceExecParams.get(i).host; Map<Integer, List<TScanRangeParams>> scanRanges = scanRangeAssignment.get(address); sb.append("{"); sb.append("id=").append(DebugUtil.printId(instanceExecParams.get(i).instanceId)); sb.append(",host=").append(instanceExecParams.get(i).host); if (scanRanges == null) { sb.append("}"); continue; } sb.append(",range=["); int eIdx = 0; for (Map.Entry<Integer, List<TScanRangeParams>> entry : scanRanges.entrySet()) { if (eIdx++ != 0) { sb.append(","); } sb.append("id").append(entry.getKey()).append(","); appendScanRange(sb, entry.getValue()); } sb.append("]"); sb.append("}"); } sb.append("]"); sb.append("}"); } }
class Coordinator { private static final Logger LOG = LogManager.getLogger(Coordinator.class); private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private static String localIP = FrontendOptions.getLocalHostAddress(); private static Random instanceRandom = new Random(); Status queryStatus = new Status(); Map<TNetworkAddress, Long> addressToBackendID = Maps.newHashMap(); private ImmutableMap<Long, Backend> idToBackend = ImmutableMap.of(); private TDescriptorTable descTable; private TQueryGlobals queryGlobals = new TQueryGlobals(); private TQueryOptions queryOptions; private TNetworkAddress coordAddress; private Lock lock = new ReentrantLock(); private boolean returnedAllResults; private RuntimeProfile queryProfile; private List<RuntimeProfile> fragmentProfile; private Map<PlanFragmentId, FragmentExecParams> fragmentExecParamsMap = Maps.newHashMap(); private List<PlanFragment> fragments; private List<BackendExecState> backendExecStates = Lists.newArrayList(); private List<BackendExecState> needCheckBackendExecStates = Lists.newArrayList(); private ResultReceiver receiver; private List<ScanNode> scanNodes; private Set<TUniqueId> instanceIds = Sets.newHashSet(); private MarkedCountDownLatch<TUniqueId, Long> profileDoneSignal; private boolean isBlockQuery; private int numReceivedRows = 0; private List<String> deltaUrls; private Map<String, String> loadCounters; private String trackingUrl; private List<String> exportFiles; private List<TTabletCommitInfo> commitInfos = Lists.newArrayList(); private long jobId = -1; private TUniqueId queryId; private TResourceInfo tResourceInfo; private boolean needReport; private String clusterName; private final TUniqueId nextInstanceId; public Coordinator(ConnectContext context, Analyzer analyzer, Planner planner) { this.isBlockQuery = planner.isBlockQuery(); this.queryId = context.queryId(); this.fragments = planner.getFragments(); this.scanNodes = planner.getScanNodes(); this.descTable = analyzer.getDescTbl().toThrift(); this.returnedAllResults = false; this.queryOptions = context.getSessionVariable().toThrift(); this.queryGlobals.setNowString(DATE_FORMAT.format(new Date())); this.queryGlobals.setTimestampMs(new Date().getTime()); if (context.getSessionVariable().getTimeZone().equals("CST")) { this.queryGlobals.setTimeZone(TimeUtils.DEFAULT_TIME_ZONE); } else { this.queryGlobals.setTimeZone(context.getSessionVariable().getTimeZone()); } this.tResourceInfo = new TResourceInfo(context.getQualifiedUser(), context.getSessionVariable().getResourceGroup()); this.needReport = context.getSessionVariable().isReportSucc(); this.clusterName = context.getClusterName(); this.nextInstanceId = new TUniqueId(); nextInstanceId.setHi(queryId.hi); nextInstanceId.setLo(queryId.lo + 1); } public Coordinator(Long jobId, TUniqueId queryId, DescriptorTable descTable, List<PlanFragment> fragments, List<ScanNode> scanNodes, String cluster, String timezone) { this.isBlockQuery = true; this.jobId = jobId; this.queryId = queryId; this.descTable = descTable.toThrift(); this.fragments = fragments; this.scanNodes = scanNodes; this.queryOptions = new TQueryOptions(); this.queryGlobals.setNowString(DATE_FORMAT.format(new Date())); this.queryGlobals.setTimestampMs(new Date().getTime()); this.queryGlobals.setTimeZone(timezone); this.tResourceInfo = new TResourceInfo("", ""); this.needReport = true; this.clusterName = cluster; this.nextInstanceId = new TUniqueId(); nextInstanceId.setHi(queryId.hi); nextInstanceId.setLo(queryId.lo + 1); } public long getJobId() { return jobId; } public TUniqueId getQueryId() { return queryId; } public void setQueryId(TUniqueId queryId) { this.queryId = queryId; } public void setQueryType(TQueryType type) { this.queryOptions.setQueryType(type); } public Status getExecStatus() { return queryStatus; } public RuntimeProfile getQueryProfile() { return queryProfile; } public List<String> getDeltaUrls() { return deltaUrls; } public Map<String, String> getLoadCounters() { return loadCounters; } public String getTrackingUrl() { return trackingUrl; } public void setExecMemoryLimit(long execMemoryLimit) { this.queryOptions.setMemLimit(execMemoryLimit); } public void setLoadMemLimit(long loadMemLimit) { this.queryOptions.setLoadMemLimit(loadMemLimit); } public void setTimeout(int timeout) { this.queryOptions.setQueryTimeout(timeout); } public void clearExportStatus() { lock.lock(); try { this.backendExecStates.clear(); this.queryStatus.setStatus(new Status()); if (this.exportFiles == null) { this.exportFiles = Lists.newArrayList(); } this.exportFiles.clear(); this.needCheckBackendExecStates.clear(); } finally { lock.unlock(); } } public List<TTabletCommitInfo> getCommitInfos() { return commitInfos; } private void prepare() { for (PlanFragment fragment : fragments) { fragmentExecParamsMap.put(fragment.getFragmentId(), new FragmentExecParams(fragment)); } for (PlanFragment fragment : fragments) { if (!(fragment.getSink() instanceof DataStreamSink)) { continue; } FragmentExecParams params = fragmentExecParamsMap.get(fragment.getDestFragment().getFragmentId()); params.inputFragments.add(fragment.getFragmentId()); } coordAddress = new TNetworkAddress(localIP, Config.rpc_port); int fragmentSize = fragments.size(); queryProfile = new RuntimeProfile("Execution Profile " + DebugUtil.printId(queryId)); fragmentProfile = new ArrayList<RuntimeProfile>(); for (int i = 0; i < fragmentSize; i ++) { fragmentProfile.add(new RuntimeProfile("Fragment " + i)); queryProfile.addChild(fragmentProfile.get(i)); } this.idToBackend = Catalog.getCurrentSystemInfo().getIdToBackend(); if (LOG.isDebugEnabled()) { LOG.debug("idToBackend size={}", idToBackend.size()); for (Map.Entry<Long, Backend> entry : idToBackend.entrySet()) { Long backendID = entry.getKey(); Backend backend = entry.getValue(); LOG.debug("backend: {}-{}-{}", backendID, backend.getHost(), backend.getBePort()); } } } private void lock() { lock.lock(); } private void unlock() { lock.unlock(); } private void traceInstance() { if (LOG.isDebugEnabled()) { StringBuilder sb = new StringBuilder(); int idx = 0; sb.append("query id=").append(DebugUtil.printId(queryId)).append(","); sb.append("fragment=["); for (Map.Entry<PlanFragmentId, FragmentExecParams> entry : fragmentExecParamsMap.entrySet()) { if (idx++ != 0) { sb.append(","); } sb.append(entry.getKey()); entry.getValue().appendTo(sb); } sb.append("]"); LOG.debug(sb.toString()); } } public void exec() throws Exception { if (LOG.isDebugEnabled() && !scanNodes.isEmpty()) { LOG.debug("debug: in Coordinator::exec. query id: {}, planNode: {}", DebugUtil.printId(queryId), scanNodes.get(0).treeToThrift()); } if (LOG.isDebugEnabled() && !fragments.isEmpty()) { LOG.debug("debug: in Coordinator::exec. query id: {}, fragment: {}", DebugUtil.printId(queryId), fragments.get(0).toThrift()); } prepare(); computeScanRangeAssignment(); computeFragmentExecParams(); traceInstance(); PlanFragmentId topId = fragments.get(0).getFragmentId(); FragmentExecParams topParams = fragmentExecParamsMap.get(topId); if (topParams.fragment.getSink() instanceof ResultSink) { TNetworkAddress execBeAddr = topParams.instanceExecParams.get(0).host; receiver = new ResultReceiver( topParams.instanceExecParams.get(0).instanceId, addressToBackendID.get(execBeAddr), toBrpcHost(execBeAddr), queryOptions.query_timeout * 1000); LOG.info("dispatch query job: {} to {}", DebugUtil.printId(queryId), topParams.instanceExecParams.get(0).host); ResultSink resultSink = (ResultSink) topParams.fragment.getSink(); if (resultSink.isOutputFileSink() && resultSink.needBroker()) { FsBroker broker = Catalog.getCurrentCatalog().getBrokerMgr().getBroker(resultSink.getBrokerName(), execBeAddr.getHostname()); resultSink.setBrokerAddr(broker.ip, broker.port); LOG.info("OUTFILE through broker: {}:{}", broker.ip, broker.port); } } else { this.queryOptions.setIsReportSuccess(true); deltaUrls = Lists.newArrayList(); loadCounters = Maps.newHashMap(); List<Long> relatedBackendIds = Lists.newArrayList(addressToBackendID.values()); Catalog.getCurrentCatalog().getLoadManager().initJobProgress(jobId, queryId, instanceIds, relatedBackendIds); LOG.info("dispatch load job: {} to {}", DebugUtil.printId(queryId), addressToBackendID.keySet()); } profileDoneSignal = new MarkedCountDownLatch<TUniqueId, Long>(instanceIds.size()); for (TUniqueId instanceId : instanceIds) { profileDoneSignal.addMark(instanceId, -1L /* value is meaningless */); } lock(); try { int backendId = 0; int profileFragmentId = 0; long memoryLimit = queryOptions.getMemLimit(); for (PlanFragment fragment : fragments) { FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId()); int instanceNum = params.instanceExecParams.size(); Preconditions.checkState(instanceNum > 0); List<TExecPlanFragmentParams> tParams = params.toThrift(backendId); List<Pair<BackendExecState, Future<PExecPlanFragmentResult>>> futures = Lists.newArrayList(); if (colocateFragmentIds.contains(fragment.getFragmentId().asInt())) { int rate = Math.min(Config.query_colocate_join_memory_limit_penalty_factor, instanceNum); long newmemory = memoryLimit / rate; for (TExecPlanFragmentParams tParam : tParams) { tParam.query_options.setMemLimit(newmemory); } } boolean needCheckBackendState = false; if (queryOptions.getQueryType() == TQueryType.LOAD && profileFragmentId == 0) { needCheckBackendState = true; } int instanceId = 0; for (TExecPlanFragmentParams tParam : tParams) { BackendExecState execState = new BackendExecState(fragment.getFragmentId(), instanceId++, profileFragmentId, tParam, this.addressToBackendID); backendExecStates.add(execState); if (needCheckBackendState) { needCheckBackendExecStates.add(execState); if (LOG.isDebugEnabled()) { LOG.debug("add need check backend {} for fragment, {} job: {}", execState.backend.getId(), fragment.getFragmentId().asInt(), jobId); } } futures.add(Pair.create(execState, execState.execRemoteFragmentAsync())); backendId++; } for (Pair<BackendExecState, Future<PExecPlanFragmentResult>> pair : futures) { TStatusCode code = TStatusCode.INTERNAL_ERROR; String errMsg = null; try { PExecPlanFragmentResult result = pair.second.get(Config.remote_fragment_exec_timeout_ms, TimeUnit.MILLISECONDS); code = TStatusCode.findByValue(result.status.status_code); if (result.status.error_msgs != null && !result.status.error_msgs.isEmpty()) { errMsg = result.status.error_msgs.get(0); } } catch (ExecutionException e) { LOG.warn("catch a execute exception", e); code = TStatusCode.THRIFT_RPC_ERROR; } catch (InterruptedException e) { LOG.warn("catch a interrupt exception", e); code = TStatusCode.INTERNAL_ERROR; } catch (TimeoutException e) { LOG.warn("catch a timeout exception", e); code = TStatusCode.TIMEOUT; } if (code != TStatusCode.OK) { if (errMsg == null) { errMsg = "exec rpc error. backend id: " + pair.first.backend.getId(); } queryStatus.setStatus(errMsg); LOG.warn("exec plan fragment failed, errmsg={}, code: {}, fragmentId={}, backend={}:{}", errMsg, code, fragment.getFragmentId(), pair.first.address.hostname, pair.first.address.port); cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR); switch (code) { case TIMEOUT: throw new UserException("query timeout. backend id: " + pair.first.backend.getId()); case THRIFT_RPC_ERROR: SimpleScheduler.addToBlacklist(pair.first.backend.getId()); throw new RpcException(pair.first.backend.getHost(), "rpc failed"); default: throw new UserException(errMsg); } } } profileFragmentId += 1; } attachInstanceProfileToFragmentProfile(); } finally { unlock(); } } public List<String> getExportFiles() { return exportFiles; } void updateExportFiles(List<String> files) { lock.lock(); try { if (exportFiles == null) { exportFiles = Lists.newArrayList(); } exportFiles.addAll(files); } finally { lock.unlock(); } } void updateDeltas(List<String> urls) { lock.lock(); try { deltaUrls.addAll(urls); } finally { lock.unlock(); } } private void updateLoadCounters(Map<String, String> newLoadCounters) { lock.lock(); try { long numRowsNormal = 0L; String value = this.loadCounters.get(LoadEtlTask.DPP_NORMAL_ALL); if (value != null) { numRowsNormal = Long.valueOf(value); } long numRowsAbnormal = 0L; value = this.loadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL); if (value != null) { numRowsAbnormal = Long.valueOf(value); } long numRowsUnselected = 0L; value = this.loadCounters.get(LoadJob.UNSELECTED_ROWS); if (value != null) { numRowsUnselected = Long.valueOf(value); } value = newLoadCounters.get(LoadEtlTask.DPP_NORMAL_ALL); if (value != null) { numRowsNormal += Long.valueOf(value); } value = newLoadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL); if (value != null) { numRowsAbnormal += Long.valueOf(value); } value = newLoadCounters.get(LoadJob.UNSELECTED_ROWS); if (value != null) { numRowsUnselected += Long.valueOf(value); } this.loadCounters.put(LoadEtlTask.DPP_NORMAL_ALL, "" + numRowsNormal); this.loadCounters.put(LoadEtlTask.DPP_ABNORMAL_ALL, "" + numRowsAbnormal); this.loadCounters.put(LoadJob.UNSELECTED_ROWS, "" + numRowsUnselected); } finally { lock.unlock(); } } private void updateCommitInfos(List<TTabletCommitInfo> commitInfos) { lock.lock(); try { this.commitInfos.addAll(commitInfos); } finally { lock.unlock(); } } private void updateStatus(Status status, TUniqueId instanceId) { lock.lock(); try { if (returnedAllResults && status.isCancelled()) { return; } if (status.ok()) { return; } if (!queryStatus.ok()) { return; } queryStatus.setStatus(status); LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}, query id: {}, instance id: {}", jobId, DebugUtil.printId(queryId), instanceId != null ? DebugUtil.printId(instanceId) : "NaN"); cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR); } finally { lock.unlock(); } } public RowBatch getNext() throws Exception { if (receiver == null) { throw new UserException("There is no receiver."); } RowBatch resultBatch; Status status = new Status(); resultBatch = receiver.getNext(status); if (!status.ok()) { LOG.warn("get next fail, need cancel. query id: {}", DebugUtil.printId(queryId)); } updateStatus(status, null /* no instance id */); Status copyStatus = null; lock(); try { copyStatus = new Status(queryStatus); } finally { unlock(); } if (!copyStatus.ok()) { if (Strings.isNullOrEmpty(copyStatus.getErrorMsg())) { copyStatus.rewriteErrorMsg(); } if (copyStatus.isRpcError()) { throw new RpcException("unknown", copyStatus.getErrorMsg()); } else { String errMsg = copyStatus.getErrorMsg(); LOG.warn("query failed: {}", errMsg); int hostIndex = errMsg.indexOf("host"); if (hostIndex != -1) { errMsg = errMsg.substring(0, hostIndex); } throw new UserException(errMsg); } } if (resultBatch.isEos()) { this.returnedAllResults = true; Long numLimitRows = fragments.get(0).getPlanRoot().getLimit(); boolean hasLimit = numLimitRows > 0; if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) { LOG.debug("no block query, return num >= limit rows, need cancel"); cancelInternal(PPlanFragmentCancelReason.LIMIT_REACH); } } else { numReceivedRows += resultBatch.getBatch().getRowsSize(); } return resultBatch; } public void cancel() { lock(); try { if (!queryStatus.ok()) { return; } else { queryStatus.setStatus(Status.CANCELLED); } LOG.warn("cancel execution of query, this is outside invoke"); cancelInternal(PPlanFragmentCancelReason.USER_CANCEL); } finally { unlock(); } } private void cancelInternal(PPlanFragmentCancelReason cancelReason) { if (null != receiver) { receiver.cancel(); } cancelRemoteFragmentsAsync(cancelReason); if (profileDoneSignal != null) { profileDoneSignal.countDownToZero(new Status()); LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks().stream().map(e->DebugUtil.printId(e.getKey())).toArray()); } } private void cancelRemoteFragmentsAsync(PPlanFragmentCancelReason cancelReason) { for (BackendExecState backendExecState : backendExecStates) { backendExecState.cancelFragmentInstance(cancelReason); } } private TNetworkAddress toRpcHost(TNetworkAddress host) throws Exception { Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort( host.getHostname(), host.getPort()); if (backend == null) { throw new UserException("there is no scanNode Backend"); } TNetworkAddress dest = new TNetworkAddress(backend.getHost(), backend.getBeRpcPort()); return dest; } private TNetworkAddress toBrpcHost(TNetworkAddress host) throws Exception { Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort( host.getHostname(), host.getPort()); if (backend == null) { throw new UserException("there is no scanNode Backend"); } if (backend.getBrpcPort() < 0) { return null; } return new TNetworkAddress(backend.getHost(), backend.getBrpcPort()); } private boolean containsUnionNode(PlanNode node) { if (node instanceof UnionNode) { return true; } for (PlanNode child : node.getChildren()) { if (child instanceof ExchangeNode) { continue; } else if (child instanceof UnionNode) { return true; } else { return containsUnionNode(child); } } return false; } private boolean containsIntersectNode(PlanNode node) { if (node instanceof IntersectNode) { return true; } for (PlanNode child : node.getChildren()) { if (child instanceof ExchangeNode) { continue; } else if (child instanceof IntersectNode) { return true; } else { return containsIntersectNode(child); } } return false; } private boolean containsExceptNode(PlanNode node) { if (node instanceof ExceptNode) { return true; } for (PlanNode child : node.getChildren()) { if (child instanceof ExchangeNode) { continue; } else if (child instanceof ExceptNode) { return true; } else { return containsExceptNode(child); } } return false; } private boolean containsSetOperationNode(PlanNode node) { if (node instanceof SetOperationNode) { return true; } for (PlanNode child : node.getChildren()) { if (child instanceof ExchangeNode) { continue; } else if (child instanceof SetOperationNode) { return true; } else { return containsSetOperationNode(child); } } return false; } private void computeFragmentHosts() throws Exception { for (int i = fragments.size() - 1; i >= 0; --i) { PlanFragment fragment = fragments.get(i); FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId()); if (fragment.getDataPartition() == DataPartition.UNPARTITIONED) { Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef); if (execHostport == null) { LOG.warn("DataPartition UNPARTITIONED, no scanNode Backend"); throw new UserException("there is no scanNode Backend"); } this.addressToBackendID.put(execHostport, backendIdRef.getRef()); FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport, 0, params); params.instanceExecParams.add(instanceParam); continue; } Pair<PlanNode, PlanNode> pairNodes = findLeftmostNode(fragment.getPlanRoot()); PlanNode fatherNode = pairNodes.first; PlanNode leftMostNode = pairNodes.second; /* * Case A: * if the left most is ScanNode, which means there is no child fragment, * we should assign fragment instances on every scan node hosts. * Case B: * if not, there should be exchange nodes to collect all data from child fragments(input fragments), * so we should assign fragment instances corresponding to the child fragments' host */ if (!(leftMostNode instanceof ScanNode)) { int inputFragmentIndex = 0; int maxParallelism = 0; int childrenCount = (fatherNode != null) ? fatherNode.getChildren().size() : 1; for (int j = 0; j < childrenCount; j++) { int currentChildFragmentParallelism = fragmentExecParamsMap.get(fragment.getChild(j).getFragmentId()).instanceExecParams.size(); if (currentChildFragmentParallelism > maxParallelism) { maxParallelism = currentChildFragmentParallelism; inputFragmentIndex = j; } } PlanFragmentId inputFragmentId = fragment.getChild(inputFragmentIndex).getFragmentId(); int exchangeInstances = -1; if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable() != null) { exchangeInstances = ConnectContext.get().getSessionVariable().getExchangeInstanceParallel(); } if (exchangeInstances > 0 && fragmentExecParamsMap.get(inputFragmentId).instanceExecParams.size() > exchangeInstances) { Set<TNetworkAddress> hostSet = Sets.newHashSet(); for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentId).instanceExecParams) { hostSet.add(execParams.host); } List<TNetworkAddress> hosts = Lists.newArrayList(hostSet); Collections.shuffle(hosts, instanceRandom); for (int index = 0; index < exchangeInstances; index++) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, hosts.get(index % hosts.size()), 0, params); params.instanceExecParams.add(instanceParam); } } else { for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentId).instanceExecParams) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, execParams.host, 0, params); params.instanceExecParams.add(instanceParam); } } Collections.shuffle(params.instanceExecParams, instanceRandom); continue; } int parallelExecInstanceNum = fragment.getParallelExecNum(); if ((isColocateJoin(fragment.getPlanRoot()) && fragmentIdToSeqToAddressMap.containsKey(fragment.getFragmentId()) && fragmentIdToSeqToAddressMap.get(fragment.getFragmentId()).size() > 0)) { computeColocateJoinInstanceParam(fragment.getFragmentId(), parallelExecInstanceNum, params); } else if (bucketShuffleJoinController.isBucketShuffleJoin(fragment.getFragmentId().asInt())) { bucketShuffleJoinController.computeInstanceParam(fragment.getFragmentId(), parallelExecInstanceNum, params); } else { Iterator iter = fragmentExecParamsMap.get(fragment.getFragmentId()).scanRangeAssignment.entrySet().iterator(); while (iter.hasNext()) { Map.Entry entry = (Map.Entry) iter.next(); TNetworkAddress key = (TNetworkAddress) entry.getKey(); Map<Integer, List<TScanRangeParams>> value = (Map<Integer, List<TScanRangeParams>>) entry.getValue(); for (Integer planNodeId : value.keySet()) { List<TScanRangeParams> perNodeScanRanges = value.get(planNodeId); int expectedInstanceNum = 1; if (parallelExecInstanceNum > 1) { expectedInstanceNum = Math.min(perNodeScanRanges.size(), parallelExecInstanceNum); } List<List<TScanRangeParams>> perInstanceScanRanges = ListUtil.splitBySize(perNodeScanRanges, expectedInstanceNum); for (List<TScanRangeParams> scanRangeParams : perInstanceScanRanges) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, key, 0, params); instanceParam.perNodeScanRanges.put(planNodeId, scanRangeParams); params.instanceExecParams.add(instanceParam); } } } } if (params.instanceExecParams.isEmpty()) { Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef); if (execHostport == null) { throw new UserException("there is no scanNode Backend"); } this.addressToBackendID.put(execHostport, backendIdRef.getRef()); FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport, 0, params); params.instanceExecParams.add(instanceParam); } } } private boolean isColocateJoin(PlanNode node) { if (Config.disable_colocate_join) { return false; } if (ConnectContext.get() != null) { if (ConnectContext.get().getSessionVariable().isDisableColocateJoin()) { return false; } } if (colocateFragmentIds.contains(node.getFragmentId().asInt())) { return true; } if (node instanceof HashJoinNode) { HashJoinNode joinNode = (HashJoinNode) node; if (joinNode.isColocate()) { colocateFragmentIds.add(joinNode.getFragmentId().asInt()); return true; } } for (PlanNode childNode : node.getChildren()) { return isColocateJoin(childNode); } return false; } private Pair<PlanNode, PlanNode> findLeftmostNode(PlanNode plan) { PlanNode newPlan = plan; PlanNode fatherPlan = null; while (newPlan.getChildren().size() != 0 && !(newPlan instanceof ExchangeNode)) { fatherPlan = newPlan; newPlan = newPlan.getChild(0); } return new Pair<PlanNode, PlanNode>(fatherPlan, newPlan); } private <K, V> V findOrInsert(HashMap<K, V> m, final K key, final V defaultVal) { V value = m.get(key); if (value == null) { m.put(key, defaultVal); value = defaultVal; } return value; } private List<TScanRangeParams> findOrInsert(Map<Integer, List<TScanRangeParams>> m, Integer key, ArrayList<TScanRangeParams> defaultVal) { List<TScanRangeParams> value = m.get(key); if (value == null) { m.put(key, defaultVal); value = defaultVal; } return value; } private long getScanRangeLength(final TScanRange scanRange) { return 1; } private void computeColocateJoinInstanceParam(PlanFragmentId fragmentId, int parallelExecInstanceNum, FragmentExecParams params) { Map<Integer, TNetworkAddress> bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(fragmentId); Map<TNetworkAddress, List<Map<Integer, List<TScanRangeParams>>>> addressToScanRanges = Maps.newHashMap(); for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) { TNetworkAddress address = bucketSeqToAddress.get(scanRanges.getKey()); Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue(); if (!addressToScanRanges.containsKey(address)) { addressToScanRanges.put(address, Lists.newArrayList()); } addressToScanRanges.get(address).add(nodeScanRanges); } for (Map.Entry<TNetworkAddress, List<Map<Integer, List<TScanRangeParams>>>> addressScanRange : addressToScanRanges.entrySet()) { List<Map<Integer, List<TScanRangeParams>>> scanRange = addressScanRange.getValue(); int expectedInstanceNum = 1; if (parallelExecInstanceNum > 1) { expectedInstanceNum = Math.min(scanRange.size(), parallelExecInstanceNum); } List<List<Map<Integer, List<TScanRangeParams>>>> perInstanceScanRanges = ListUtil.splitBySize(scanRange, expectedInstanceNum); for (List<Map<Integer, List<TScanRangeParams>>> perInstanceScanRange : perInstanceScanRanges) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, addressScanRange.getKey(), 0, params); for (Map<Integer, List<TScanRangeParams>> nodeScanRangeMap : perInstanceScanRange) { for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRangeMap.entrySet()) { if (!instanceParam.perNodeScanRanges.containsKey(nodeScanRange.getKey())) { instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue()); } else { instanceParam.perNodeScanRanges.get(nodeScanRange.getKey()).addAll(nodeScanRange.getValue()); } } } params.instanceExecParams.add(instanceParam); } } } private void computeScanRangeAssignment() throws Exception { for (ScanNode scanNode : scanNodes) { List<TScanRangeLocations> locations = scanNode.getScanRangeLocations(0); if (locations == null) { continue; } FragmentScanRangeAssignment assignment = fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment; if (isColocateJoin(scanNode.getFragment().getPlanRoot())) { computeScanRangeAssignmentByColocate((OlapScanNode) scanNode, assignment); } else if (bucketShuffleJoinController.isBucketShuffleJoin(scanNode.getFragmentId().asInt(), scanNode.getFragment().getPlanRoot())) { bucketShuffleJoinController.computeScanRangeAssignmentByBucket((OlapScanNode) scanNode, idToBackend, addressToBackendID); } else { computeScanRangeAssignmentByScheduler(scanNode, locations, assignment); } } } private void computeScanRangeAssignmentByColocate( final OlapScanNode scanNode, FragmentScanRangeAssignment assignment) throws Exception { if (!fragmentIdToSeqToAddressMap.containsKey(scanNode.getFragmentId())) { fragmentIdToSeqToAddressMap.put(scanNode.getFragmentId(), new HashedMap()); } Map<Integer, TNetworkAddress> bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(scanNode.getFragmentId()); for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) { List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq); if (!bucketSeqToAddress.containsKey(bucketSeq)) { getExecHostPortForFragmentIDAndBucketSeq(locations.get(0), scanNode.getFragmentId(), bucketSeq); } for(TScanRangeLocations location: locations) { Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>()); List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>()); TScanRangeParams scanRangeParams = new TScanRangeParams(); scanRangeParams.scan_range = location.scan_range; scanRangeParamsList.add(scanRangeParams); } } } private void getExecHostPortForFragmentIDAndBucketSeq(TScanRangeLocations seqLocation, PlanFragmentId fragmentId, Integer bucketSeq) throws Exception { int randomLocation = new Random().nextInt(seqLocation.locations.size()); Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostPort = SimpleScheduler.getHost(seqLocation.locations.get(randomLocation).backend_id, seqLocation.locations, this.idToBackend, backendIdRef); if (execHostPort == null) { throw new UserException("there is no scanNode Backend"); } this.addressToBackendID.put(execHostPort, backendIdRef.getRef()); this.fragmentIdToSeqToAddressMap.get(fragmentId).put(bucketSeq, execHostPort); } private void computeScanRangeAssignmentByScheduler( final ScanNode scanNode, final List<TScanRangeLocations> locations, FragmentScanRangeAssignment assignment) throws Exception { HashMap<TNetworkAddress, Long> assignedBytesPerHost = Maps.newHashMap(); for (TScanRangeLocations scanRangeLocations : locations) { Long minAssignedBytes = Long.MAX_VALUE; TScanRangeLocation minLocation = null; for (final TScanRangeLocation location : scanRangeLocations.getLocations()) { Long assignedBytes = findOrInsert(assignedBytesPerHost, location.server, 0L); if (assignedBytes < minAssignedBytes) { minAssignedBytes = assignedBytes; minLocation = location; } } Long scanRangeLength = getScanRangeLength(scanRangeLocations.scan_range); assignedBytesPerHost.put(minLocation.server, assignedBytesPerHost.get(minLocation.server) + scanRangeLength); Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostPort = SimpleScheduler.getHost(minLocation.backend_id, scanRangeLocations.getLocations(), this.idToBackend, backendIdRef); if (execHostPort == null) { throw new UserException("there is no scanNode Backend"); } this.addressToBackendID.put(execHostPort, backendIdRef.getRef()); Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(assignment, execHostPort, new HashMap<Integer, List<TScanRangeParams>>()); List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>()); TScanRangeParams scanRangeParams = new TScanRangeParams(); scanRangeParams.scan_range = scanRangeLocations.scan_range; scanRangeParams.setVolumeId(minLocation.volume_id); scanRangeParamsList.add(scanRangeParams); } } public void updateFragmentExecStatus(TReportExecStatusParams params) { if (params.backend_num >= backendExecStates.size()) { LOG.warn("unknown backend number: {}, expected less than: {}", params.backend_num, backendExecStates.size()); return; } BackendExecState execState = backendExecStates.get(params.backend_num); if (!execState.updateProfile(params)) { return; } if (LOG.isDebugEnabled()) { StringBuilder builder = new StringBuilder(); execState.printProfile(builder); LOG.debug("profile for query_id={} instance_id={}\n{}", DebugUtil.printId(queryId), DebugUtil.printId(params.getFragmentInstanceId()), builder.toString()); } Status status = new Status(params.status); if (!(returnedAllResults && status.isCancelled()) && !status.ok()) { LOG.warn("one instance report fail, query_id={} instance_id={}", DebugUtil.printId(queryId), DebugUtil.printId(params.getFragmentInstanceId())); updateStatus(status, params.getFragmentInstanceId()); } if (execState.done) { if (params.isSetDeltaUrls()) { updateDeltas(params.getDeltaUrls()); } if (params.isSetLoadCounters()) { updateLoadCounters(params.getLoadCounters()); } if (params.isSetTrackingUrl()) { trackingUrl = params.getTrackingUrl(); } if (params.isSetExportFiles()) { updateExportFiles(params.getExportFiles()); } if (params.isSetCommitInfos()) { updateCommitInfos(params.getCommitInfos()); } profileDoneSignal.markedCountDown(params.getFragmentInstanceId(), -1L); } if (params.isSetLoadedRows()) { Catalog.getCurrentCatalog().getLoadManager().updateJobPrgress( jobId, params.backend_id, params.query_id, params.fragment_instance_id, params.loaded_rows, params.done); } return; } public void endProfile() { if (backendExecStates.isEmpty()) { return; } if (needReport) { try { profileDoneSignal.await(2, TimeUnit.SECONDS); } catch (InterruptedException e1) { LOG.warn("signal await error", e1); } } for (int i = 1; i < fragmentProfile.size(); ++i) { fragmentProfile.get(i).sortChildren(); } } /* * Waiting the coordinator finish executing. * return false if waiting timeout. * return true otherwise. * NOTICE: return true does not mean that coordinator executed success, * the caller should check queryStatus for result. * * We divide the entire waiting process into multiple rounds, * with a maximum of 30 seconds per round. And after each round of waiting, * check the status of the BE. If the BE status is abnormal, the wait is ended * and the result is returned. Otherwise, continue to the next round of waiting. * This method mainly avoids the problem that the Coordinator waits for a long time * after some BE can no long return the result due to some exception, such as BE is down. */ public boolean join(int timeoutS) { final long fixedMaxWaitTime = 30; long leftTimeoutS = timeoutS; while (leftTimeoutS > 0) { long waitTime = Math.min(leftTimeoutS, fixedMaxWaitTime); boolean awaitRes = false; try { awaitRes = profileDoneSignal.await(waitTime, TimeUnit.SECONDS); } catch (InterruptedException e) { } if (awaitRes) { return true; } if (!checkBackendState()) { return true; } leftTimeoutS -= waitTime; } return false; } /* * Check the state of backends in needCheckBackendExecStates. * return true if all of them are OK. Otherwise, return false. */ private boolean checkBackendState() { for (BackendExecState backendExecState : needCheckBackendExecStates) { if (!backendExecState.isBackendStateHealthy()) { queryStatus = new Status(TStatusCode.INTERNAL_ERROR, "backend " + backendExecState.backend.getId() + " is down"); return false; } } return true; } public boolean isDone() { return profileDoneSignal.getCount() == 0; } class FragmentScanRangeAssignment extends HashMap<TNetworkAddress, Map<Integer, List<TScanRangeParams>>> { } class BucketSeqToScanRange extends HashMap<Integer, Map<Integer, List<TScanRangeParams>>> { } class BucketShuffleJoinController { private Map<PlanFragmentId, BucketSeqToScanRange> fragmentIdBucketSeqToScanRangeMap = Maps.newHashMap(); private Map<PlanFragmentId, Map<Integer, TNetworkAddress>> fragmentIdToSeqToAddressMap = Maps.newHashMap(); private Map<PlanFragmentId, Map<Long, Integer>> fragmentIdToBuckendIdBucketCountMap = Maps.newHashMap(); private Map<PlanFragmentId, Integer> fragmentIdToBucketNumMap = Maps.newHashMap(); private Set<Integer> bucketShuffleFragmentIds = new HashSet<>(); private boolean isBucketShuffleJoin(int fragmentId, PlanNode node) { if (ConnectContext.get() != null) { if (!ConnectContext.get().getSessionVariable().isEnableBucketShuffleJoin()) { return false; } } if (fragmentId != node.getFragmentId().asInt()) { return false; } if (bucketShuffleFragmentIds.contains(fragmentId)) { return true; } if (node instanceof HashJoinNode) { HashJoinNode joinNode = (HashJoinNode) node; if (joinNode.isBucketShuffle()) { bucketShuffleFragmentIds.add(joinNode.getFragmentId().asInt()); return true; } } for (PlanNode childNode : node.getChildren()) { return isBucketShuffleJoin(fragmentId, childNode); } return false; } private boolean isBucketShuffleJoin(int fragmentId) { return bucketShuffleFragmentIds.contains(fragmentId); } private int getFragmentBucketNum(PlanFragmentId fragmentId) { return fragmentIdToBucketNumMap.get(fragmentId); } private void getExecHostPortForFragmentIDAndBucketSeq(TScanRangeLocations seqLocation, PlanFragmentId fragmentId, Integer bucketSeq, ImmutableMap<Long, Backend> idToBackend, Map<TNetworkAddress, Long> addressToBackendID) throws Exception { Map<Long, Integer> buckendIdToBucketCountMap = fragmentIdToBuckendIdBucketCountMap.get(fragmentId); int maxBucketNum = Integer.MAX_VALUE; long buckendId = Long.MAX_VALUE; for (TScanRangeLocation location : seqLocation.locations) { if (buckendIdToBucketCountMap.containsKey(location.backend_id)) { if (buckendIdToBucketCountMap.get(location.backend_id) < maxBucketNum) { maxBucketNum = buckendIdToBucketCountMap.get(location.backend_id); buckendId = location.backend_id; } } else { maxBucketNum = 0; buckendId = location.backend_id; buckendIdToBucketCountMap.put(buckendId, 0); break; } } buckendIdToBucketCountMap.put(buckendId, buckendIdToBucketCountMap.get(buckendId) + 1); Reference<Long> backendIdRef = new Reference<Long>(); TNetworkAddress execHostPort = SimpleScheduler.getHost(buckendId, seqLocation.locations, idToBackend, backendIdRef); if (execHostPort == null) { throw new UserException("there is no scanNode Backend"); } addressToBackendID.put(execHostPort, backendIdRef.getRef()); this.fragmentIdToSeqToAddressMap.get(fragmentId).put(bucketSeq, execHostPort); } private void computeScanRangeAssignmentByBucket( final OlapScanNode scanNode, ImmutableMap<Long, Backend> idToBackend, Map<TNetworkAddress, Long> addressToBackendID) throws Exception { if (!fragmentIdToSeqToAddressMap.containsKey(scanNode.getFragmentId())) { fragmentIdToBucketNumMap.put(scanNode.getFragmentId(), scanNode.getOlapTable().getDefaultDistributionInfo().getBucketNum()); fragmentIdToSeqToAddressMap.put(scanNode.getFragmentId(), new HashedMap()); fragmentIdBucketSeqToScanRangeMap.put(scanNode.getFragmentId(), new BucketSeqToScanRange()); fragmentIdToBuckendIdBucketCountMap.put(scanNode.getFragmentId(), new HashMap<>()); } Map<Integer, TNetworkAddress> bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(scanNode.getFragmentId()); BucketSeqToScanRange bucketSeqToScanRange = fragmentIdBucketSeqToScanRangeMap.get(scanNode.getFragmentId()); for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) { List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq); if (!bucketSeqToAddress.containsKey(bucketSeq)) { getExecHostPortForFragmentIDAndBucketSeq(locations.get(0), scanNode.getFragmentId(), bucketSeq, idToBackend, addressToBackendID); } for(TScanRangeLocations location: locations) { Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>()); List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>()); TScanRangeParams scanRangeParams = new TScanRangeParams(); scanRangeParams.scan_range = location.scan_range; scanRangeParamsList.add(scanRangeParams); } } } private void computeInstanceParam(PlanFragmentId fragmentId, int parallelExecInstanceNum, FragmentExecParams params) { Map<Integer, TNetworkAddress> bucketSeqToAddress = fragmentIdToSeqToAddressMap.get(fragmentId); BucketSeqToScanRange bucketSeqToScanRange = fragmentIdBucketSeqToScanRangeMap.get(fragmentId); Map<TNetworkAddress, List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>>> addressToScanRanges = Maps.newHashMap(); for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) { TNetworkAddress address = bucketSeqToAddress.get(scanRanges.getKey()); Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue(); if (!addressToScanRanges.containsKey(address)) { addressToScanRanges.put(address, Lists.newArrayList()); } addressToScanRanges.get(address).add(scanRanges); } for (Map.Entry<TNetworkAddress, List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>>> addressScanRange : addressToScanRanges.entrySet()) { List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>> scanRange = addressScanRange.getValue(); int expectedInstanceNum = 1; if (parallelExecInstanceNum > 1) { expectedInstanceNum = Math.min(scanRange.size(), parallelExecInstanceNum); } List<List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>>> perInstanceScanRanges = ListUtil.splitBySize(scanRange, expectedInstanceNum); for (List<Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>>> perInstanceScanRange : perInstanceScanRanges) { FInstanceExecParam instanceParam = new FInstanceExecParam(null, addressScanRange.getKey(), 0, params); for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> nodeScanRangeMap : perInstanceScanRange) { instanceParam.addBucketSeq(nodeScanRangeMap.getKey()); for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRangeMap.getValue().entrySet()) { if (!instanceParam.perNodeScanRanges.containsKey(nodeScanRange.getKey())) { instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue()); } else { instanceParam.perNodeScanRanges.get(nodeScanRange.getKey()).addAll(nodeScanRange.getValue()); } } } params.instanceExecParams.add(instanceParam); } } } } private BucketShuffleJoinController bucketShuffleJoinController = new BucketShuffleJoinController(); private BucketSeqToScanRange bucketSeqToScanRange = new BucketSeqToScanRange(); private Map<PlanFragmentId, Map<Integer, TNetworkAddress>> fragmentIdToSeqToAddressMap = Maps.newHashMap(); private Set<Integer> colocateFragmentIds = new HashSet<>(); public class BackendExecState { TExecPlanFragmentParams rpcParams; PlanFragmentId fragmentId; int instanceId; boolean initiated; boolean done; boolean hasCanceled; int profileFragmentId; RuntimeProfile profile; TNetworkAddress address; Backend backend; long lastMissingHeartbeatTime = -1; public BackendExecState(PlanFragmentId fragmentId, int instanceId, int profileFragmentId, TExecPlanFragmentParams rpcParams, Map<TNetworkAddress, Long> addressToBackendID) { this.profileFragmentId = profileFragmentId; this.fragmentId = fragmentId; this.instanceId = instanceId; this.rpcParams = rpcParams; this.initiated = false; this.done = false; this.address = fragmentExecParamsMap.get(fragmentId).instanceExecParams.get(instanceId).host; this.backend = idToBackend.get(addressToBackendID.get(address)); String name = "Instance " + DebugUtil.printId(fragmentExecParamsMap.get(fragmentId) .instanceExecParams.get(instanceId).instanceId) + " (host=" + address + ")"; this.profile = new RuntimeProfile(name); this.hasCanceled = false; this.lastMissingHeartbeatTime = backend.getLastMissingHeartbeatTime(); } public synchronized boolean updateProfile(TReportExecStatusParams params) { if (this.done) { return false; } if (params.isSetProfile()) { profile.update(params.profile); } this.done = params.done; return true; } public synchronized void printProfile(StringBuilder builder) { this.profile.computeTimeInProfile(); this.profile.prettyPrint(builder, ""); } public synchronized boolean cancelFragmentInstance(PPlanFragmentCancelReason cancelReason) { if (LOG.isDebugEnabled()) { LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} backend: {}, fragment instance id={}, reason: {}", this.initiated, this.done, this.hasCanceled, backend.getId(), DebugUtil.printId(fragmentInstanceId()), cancelReason.name()); } try { if (!this.initiated) { return false; } if (this.done) { return false; } if (this.hasCanceled) { return false; } TNetworkAddress brpcAddress = toBrpcHost(address); try { BackendServiceProxy.getInstance().cancelPlanFragmentAsync(brpcAddress, fragmentInstanceId(), cancelReason); } catch (RpcException e) { LOG.warn("cancel plan fragment get a exception, address={}:{}", brpcAddress.getHostname(), brpcAddress.getPort()); SimpleScheduler.addToBlacklist(addressToBackendID.get(brpcAddress)); } this.hasCanceled = true; } catch (Exception e) { LOG.warn("catch a exception", e); return false; } return true; } public synchronized boolean computeTimeInProfile(int maxFragmentId) { if (this.profileFragmentId < 0 || this.profileFragmentId > maxFragmentId) { LOG.warn("profileFragmentId {} should be in [0, {})", profileFragmentId, maxFragmentId); return false; } profile.computeTimeInProfile(); return true; } public boolean isBackendStateHealthy() { if (backend.getLastMissingHeartbeatTime() > lastMissingHeartbeatTime) { LOG.warn("backend {} is down while joining the coordinator. job id: {}", backend.getId(), jobId); return false; } return true; } public Future<PExecPlanFragmentResult> execRemoteFragmentAsync() throws TException, RpcException { TNetworkAddress brpcAddress = null; try { brpcAddress = new TNetworkAddress(backend.getHost(), backend.getBrpcPort()); } catch (Exception e) { throw new TException(e.getMessage()); } this.initiated = true; try { return BackendServiceProxy.getInstance().execPlanFragmentAsync(brpcAddress, rpcParams); } catch (RpcException e) { return new Future<PExecPlanFragmentResult>() { @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; } @Override public boolean isCancelled() { return false; } @Override public boolean isDone() { return true; } @Override public PExecPlanFragmentResult get() { PExecPlanFragmentResult result = new PExecPlanFragmentResult(); PStatus pStatus = new PStatus(); pStatus.error_msgs = Lists.newArrayList(); pStatus.error_msgs.add(e.getMessage()); pStatus.status_code = TStatusCode.THRIFT_RPC_ERROR.getValue(); result.status = pStatus; return result; } @Override public PExecPlanFragmentResult get(long timeout, TimeUnit unit) { return get(); } }; } } public FragmentInstanceInfo buildFragmentInstanceInfo() { return new QueryStatisticsItem.FragmentInstanceInfo.Builder() .instanceId(fragmentInstanceId()).fragmentId(String.valueOf(fragmentId)).address(this.address) .build(); } private TUniqueId fragmentInstanceId() { return this.rpcParams.params.getFragmentInstanceId(); } } protected class FragmentExecParams { public PlanFragment fragment; public List<TPlanFragmentDestination> destinations = Lists.newArrayList(); public Map<Integer, Integer> perExchNumSenders = Maps.newHashMap(); public List<PlanFragmentId> inputFragments = Lists.newArrayList(); public List<FInstanceExecParam> instanceExecParams = Lists.newArrayList(); public FragmentScanRangeAssignment scanRangeAssignment = new FragmentScanRangeAssignment(); public FragmentExecParams(PlanFragment fragment) { this.fragment = fragment; } List<TExecPlanFragmentParams> toThrift(int backendNum) { List<TExecPlanFragmentParams> paramsList = Lists.newArrayList(); for (int i = 0; i < instanceExecParams.size(); ++i) { final FInstanceExecParam instanceExecParam = instanceExecParams.get(i); TExecPlanFragmentParams params = new TExecPlanFragmentParams(); params.setProtocolVersion(PaloInternalServiceVersion.V1); params.setFragment(fragment.toThrift()); params.setDescTbl(descTable); params.setParams(new TPlanFragmentExecParams()); params.setResourceInfo(tResourceInfo); params.params.setQueryId(queryId); params.params.setFragmentInstanceId(instanceExecParam.instanceId); Map<Integer, List<TScanRangeParams>> scanRanges = instanceExecParam.perNodeScanRanges; if (scanRanges == null) { scanRanges = Maps.newHashMap(); } params.params.setPerNodeScanRanges(scanRanges); params.params.setPerExchNumSenders(perExchNumSenders); params.params.setDestinations(destinations); params.params.setSenderId(i); params.params.setNumSenders(instanceExecParams.size()); params.setCoord(coordAddress); params.setBackendNum(backendNum++); params.setQueryGlobals(queryGlobals); params.setQueryOptions(queryOptions); params.params.setSendQueryStatisticsWithEveryBatch( fragment.isTransferQueryStatisticsWithEveryBatch()); if (queryOptions.getQueryType() == TQueryType.LOAD) { LoadErrorHub.Param param = Catalog.getCurrentCatalog().getLoadInstance().getLoadErrorHubInfo(); if (param != null) { TLoadErrorHubInfo info = param.toThrift(); if (info != null) { params.setLoadErrorHubInfo(info); } } } paramsList.add(params); } return paramsList; } public void appendScanRange(StringBuilder sb, List<TScanRangeParams> params) { sb.append("range=["); int idx = 0; for (TScanRangeParams range : params) { TPaloScanRange paloScanRange = range.getScanRange().getPaloScanRange(); if (paloScanRange != null) { if (idx++ != 0) { sb.append(","); } sb.append("{tid=").append(paloScanRange.getTabletId()) .append(",ver=").append(paloScanRange.getVersion()).append("}"); } TEsScanRange esScanRange = range.getScanRange().getEsScanRange(); if (esScanRange != null) { sb.append("{ index=").append(esScanRange.getIndex()) .append(", shardid=").append(esScanRange.getShardId()) .append("}"); } } sb.append("]"); } public void appendTo(StringBuilder sb) { sb.append("{plan="); fragment.getPlanRoot().appendTrace(sb); sb.append(",instance=["); for (int i = 0; i < instanceExecParams.size(); ++i) { if (i != 0) { sb.append(","); } TNetworkAddress address = instanceExecParams.get(i).host; Map<Integer, List<TScanRangeParams>> scanRanges = scanRangeAssignment.get(address); sb.append("{"); sb.append("id=").append(DebugUtil.printId(instanceExecParams.get(i).instanceId)); sb.append(",host=").append(instanceExecParams.get(i).host); if (scanRanges == null) { sb.append("}"); continue; } sb.append(",range=["); int eIdx = 0; for (Map.Entry<Integer, List<TScanRangeParams>> entry : scanRanges.entrySet()) { if (eIdx++ != 0) { sb.append(","); } sb.append("id").append(entry.getKey()).append(","); appendScanRange(sb, entry.getValue()); } sb.append("]"); sb.append("}"); } sb.append("]"); sb.append("}"); } }
Currently, the shift operation for tuples is not supported by the runtime. Therefore the runtime checks if the object performing the shift is an array and if not, throws an `OperationNotSupported` exception. The changes proposed in this PR allows the use of shift operation on tuples but this also allows the risk of running into mutations that violate the inherent type of the tuple (as shown in the previous comment). The check in question is added to make sure that never happens.
private void validateTupleSizeAndInherentType() { int numOfMandatoryTypes = this.tupleType.getTupleTypes().size(); if (numOfMandatoryTypes >= this.getLength()) { throw ErrorHelper.getRuntimeException( getModulePrefixedReason(ARRAY_LANG_LIB, SIZE_MISMATCH_ERROR_IDENTIFIER), ErrorCodes.INVALID_MEMBER_SIZE, numOfMandatoryTypes + 1, this.getLength()); } for (int i = 1; i <= numOfMandatoryTypes; i++) { if (!TypeChecker.checkIsType(this.getRefValue(i), this.tupleType.getTupleTypes().get(i - 1))) { throw ErrorHelper.getRuntimeException( getModulePrefixedReason(ARRAY_LANG_LIB, INHERENT_TYPE_VIOLATION_ERROR_IDENTIFIER), ErrorCodes.INCOMPATIBLE_TYPE, this.tupleType.getTupleTypes().get(i - 1), (i == numOfMandatoryTypes) ? this.tupleType.getRestType() : this.tupleType.getTupleTypes().get(i)); } } }
this.tupleType.getRestType() : this.tupleType.getTupleTypes().get(i));
private void validateTupleSizeAndInherentType() { List<Type> tupleTypesList = this.tupleType.getTupleTypes(); int numOfMandatoryTypes = tupleTypesList.size(); if (numOfMandatoryTypes >= this.getLength()) { throw ErrorHelper.getRuntimeException(getModulePrefixedReason(ARRAY_LANG_LIB, OPERATION_NOT_SUPPORTED_IDENTIFIER), ErrorCodes.INVALID_TUPLE_MEMBER_SIZE, "shift"); } for (int i = 1; i <= numOfMandatoryTypes; i++) { if (!TypeChecker.checkIsType(this.getRefValue(i), tupleTypesList.get(i - 1))) { throw ErrorHelper.getRuntimeException(getModulePrefixedReason(ARRAY_LANG_LIB, INHERENT_TYPE_VIOLATION_ERROR_IDENTIFIER), ErrorCodes.INCOMPATIBLE_TYPE, tupleTypesList.get(i - 1), (i == numOfMandatoryTypes) ? this.tupleType.getRestType() : tupleTypesList.get(i)); } } }
class TupleValueImpl extends AbstractArrayValue { protected TupleType tupleType; protected Type type; Object[] refValues; private final int minSize; private final boolean hasRestElement; private BTypedesc typedesc; private TypedescValueImpl inherentType; public TupleValueImpl(Object[] values, TupleType type) { this.refValues = values; this.type = this.tupleType = type; this.hasRestElement = this.tupleType.getRestType() != null; List<Type> memTypes = type.getTupleTypes(); int memCount = memTypes.size(); if (values.length < memCount) { this.refValues = Arrays.copyOf(refValues, memCount); for (int i = values.length; i < memCount; i++) { refValues[i] = memTypes.get(i).getZeroValue(); } } this.minSize = memTypes.size(); this.size = refValues.length; } public TupleValueImpl(TupleType type) { this.type = this.tupleType = type; List<Type> memTypes = this.tupleType.getTupleTypes(); int memTypeCount = memTypes.size(); this.minSize = this.size = memTypeCount; this.hasRestElement = this.tupleType.getRestType() != null; if (type.getRestType() == null) { this.maxSize = this.size; this.refValues = new Object[this.size]; } else { this.refValues = new Object[DEFAULT_ARRAY_SIZE]; } for (int i = 0; i < memTypeCount; i++) { Type memType = memTypes.get(i); if (!TypeChecker.hasFillerValue(memType)) { continue; } this.refValues[i] = memType.getZeroValue(); } } public TupleValueImpl(TupleType type, long size, BListInitialValueEntry[] initialValues) { this(type, initialValues); } public TupleValueImpl(Type type, BListInitialValueEntry[] initialValues) { this.type = type; this.tupleType = (TupleType) TypeUtils.getImpliedType(type); List<Type> memTypes = this.tupleType.getTupleTypes(); int memCount = memTypes.size(); if (tupleType.getRestType() != null) { int valueCount = 0; for (BListInitialValueEntry listEntry : initialValues) { if (listEntry instanceof ListInitialValueEntry.ExpressionEntry) { valueCount++; } else { BArray values = ((ListInitialValueEntry.SpreadEntry) listEntry).values; valueCount += values.size(); } } this.size = Math.max(valueCount, memCount); } else { this.size = memCount; } this.minSize = memCount; this.hasRestElement = this.tupleType.getRestType() != null; if (tupleType.getRestType() == null) { this.maxSize = this.size; this.refValues = new Object[this.size]; } else { this.refValues = new Object[DEFAULT_ARRAY_SIZE]; } int index = 0; for (BListInitialValueEntry listEntry : initialValues) { if (listEntry instanceof ListInitialValueEntry.ExpressionEntry expressionEntry) { addRefValue(index++, expressionEntry.value); } else { BArray values = ((ListInitialValueEntry.SpreadEntry) listEntry).values; BIterator<?> iterator = values.getIterator(); while (iterator.hasNext()) { addRefValue(index++, iterator.next()); } } } if (index >= memCount) { return; } for (int i = index; i < memCount; i++) { Type memType = memTypes.get(i); if (!TypeChecker.hasFillerValue(memType)) { continue; } this.refValues[i] = memType.getZeroValue(); } } public TupleValueImpl(Type type, BListInitialValueEntry[] initialValues, TypedescValueImpl inherentType) { this(type, initialValues); this.inherentType = inherentType; } @Override public BTypedesc getTypedesc() { if (this.typedesc == null) { if (inherentType != null) { this.typedesc = getTypedescValue(type.isReadOnly(), this, inherentType); } else { this.typedesc = getTypedescValue(type, this); } } return typedesc; } /** * Get value in the given array index. * * @param index array index * @return array value */ @Override public Object get(long index) { rangeCheckForGet(index, this.size); return this.refValues[(int) index]; } /** * Get ref value in the given index. * * @param index array index * @return array value */ @Override public Object getRefValue(long index) { return get(index); } @Override public Object fillAndGetRefValue(long index) { if (index >= this.size && this.hasRestElement) { handleImmutableArrayValue(); fillRead(index, refValues.length); return this.refValues[(int) index]; } return get(index); } /** * Get int value in the given index. * * @param index array index * @return array element */ @Override public long getInt(long index) { return (Long) get(index); } /** * Get boolean value in the given index. * * @param index array index * @return array element */ public boolean getBoolean(long index) { return (Boolean) get(index); } /** * Get byte value in the given index. * * @param index array index * @return array element */ @Override public byte getByte(long index) { Object value = get(index); if (value instanceof Long) { return ((Long) value).byteValue(); } return (Byte) value; } /** * Get float value in the given index. * * @param index array index * @return array element */ @Override public double getFloat(long index) { return (Double) get(index); } /** * Get string value in the given index. * * @param index array index * @return array element */ @Override public String getString(long index) { return get(index).toString(); } /** * Get string value in the given index. * * @param index array index * @return array element */ @Override public BString getBString(long index) { return (BString) get(index); } /** * Add ref value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, Object value) { handleImmutableArrayValue(); addRefValue(index, value); } public void addRefValue(long index, Object value) { prepareForAdd(index, value, refValues.length); refValues[(int) index] = value; } public void convertStringAndAddRefValue(long index, BString value) { rangeCheck(index, size); int intIndex = (int) index; Type elemType; if (index >= this.minSize) { elemType = this.tupleType.getRestType(); } else { elemType = this.tupleType.getTupleTypes().get(intIndex); } Object val = ValueConverter.getConvertedStringValue(value, elemType); prepareForAddWithoutTypeCheck(refValues.length, intIndex); refValues[intIndex] = val; } public void addRefValueForcefully(int index, Object value) { prepareForAddForcefully(index, refValues.length); refValues[index] = value; } /** * Add int value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, long value) { add(index, Long.valueOf(value)); } /** * Add boolean value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, boolean value) { add(index, Boolean.valueOf(value)); } /** * Add byte value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, byte value) { add(index, Byte.valueOf(value)); } /** * Add double value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, double value) { add(index, Double.valueOf(value)); } /** * Add string value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, String value) { add(index, (Object) value); } /** * Add string value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, BString value) { add(index, (Object) value); } /** * Append value to the existing array. * * @param value value to be appended */ @Override public void append(Object value) { add(size, value); } @Override public Object shift(long index) { handleImmutableArrayValue(); validateTupleSizeAndInherentType(); Object val = get(index); shiftArray((int) index); return val; } /** * Removes and returns first member of an array. * * @return the value that was the first member of the array */ @Override public Object shift() { return shift(0); } @Override public void unshift(Object[] values) { unshift(0, values); } @Override public String stringValue(BLink parent) { StringJoiner sj = new StringJoiner(","); for (int i = 0; i < this.size; i++) { Object value = this.refValues[i]; Type type = TypeChecker.getType(value); CycleUtils.Node parentNode = new CycleUtils.Node(this, parent); switch (type.getTag()) { case TypeTags.STRING_TAG: case TypeTags.XML_TAG: case TypeTags.XML_ELEMENT_TAG: case TypeTags.XML_ATTRIBUTES_TAG: case TypeTags.XML_COMMENT_TAG: case TypeTags.XML_PI_TAG: case TypeTags.XMLNS_TAG: case TypeTags.XML_TEXT_TAG: sj.add(((BValue) value).informalStringValue(parentNode)); break; case TypeTags.NULL_TAG: sj.add("null"); break; default: sj.add(getStringVal(value, new CycleUtils.Node(this, parentNode))); break; } } return "[" + sj + "]"; } @Override public String expressionStringValue(BLink parent) { StringJoiner sj = new StringJoiner(","); for (int i = 0; i < this.size; i++) { sj.add(getExpressionStringVal(this.refValues[i], new CycleUtils.Node(this, parent))); } return "[" + sj + "]"; } @Override public Type getType() { return this.type; } @Override public int size() { return this.size; } @Override public boolean isEmpty() { return this.size == 0; } @Override public BArray slice(long startIndex, long endIndex) { return null; } @Override public Object copy(Map<Object, Object> refs) { if (isFrozen()) { return this; } if (refs.containsKey(this)) { return refs.get(this); } Object[] values = new Object[this.size]; TupleValueImpl refValueArray = new TupleValueImpl(values, this.tupleType); refs.put(this, refValueArray); IntStream.range(0, this.size).forEach(i -> { Object value = this.refValues[i]; if (value instanceof BRefValue) { values[i] = ((BRefValue) value).copy(refs); } else { values[i] = value; } }); return refValueArray; } /** * Get ref values array. * * @return ref value array */ @Override public Object[] getValues() { return refValues; } /** * Get a copy of byte array. * * @return byte array */ @Override public byte[] getBytes() { throw new UnsupportedOperationException(); } /** * Get a copy of string array. * * @return string array */ @Override public String[] getStringArray() { throw new UnsupportedOperationException(); } /** * Get a copy of int array. * * @return int array */ @Override public long[] getIntArray() { throw new UnsupportedOperationException(); } @Override public boolean[] getBooleanArray() { throw new UnsupportedOperationException(); } @Override public byte[] getByteArray() { throw new UnsupportedOperationException(); } @Override public double[] getFloatArray() { throw new UnsupportedOperationException(); } @Override public void serialize(OutputStream outputStream) { try { outputStream.write(this.toString().getBytes(Charset.defaultCharset())); } catch (IOException e) { throw ErrorCreator.createError(StringUtils.fromString("error occurred while serializing data"), e); } } /** * {@inheritDoc} */ @Override public void freezeDirect() { if (tupleType.isReadOnly()) { return; } this.type = ReadOnlyUtils.setImmutableTypeAndGetEffectiveType(this.type); this.tupleType = (TupleType) TypeUtils.getImpliedType(type); for (int i = 0; i < this.size; i++) { Object value = this.get(i); if (value instanceof BRefValue) { ((BRefValue) value).freezeDirect(); } } this.typedesc = null; } /** * {@inheritDoc} */ @Override public IteratorValue getIterator() { return new ArrayIterator(this); } /** * Get {@code BType} of the array elements. * * @return element type */ @Override public Type getElementType() { throw new UnsupportedOperationException(); } @Override protected void resizeInternalArray(int newLength) { refValues = Arrays.copyOf(refValues, newLength); } @Override protected void fillValues(int index) { if (index <= size) { return; } Type restType = this.tupleType.getRestType(); if (restType != null) { for (int i = size; i < index; i++) { this.refValues[i] = restType.getZeroValue(); } } } @Override protected void rangeCheckForGet(long index, int size) { rangeCheck(index, size); if (index < 0 || index >= size) { throw ErrorHelper.getRuntimeException( getModulePrefixedReason(ARRAY_LANG_LIB, INDEX_OUT_OF_RANGE_ERROR_IDENTIFIER), ErrorCodes.TUPLE_INDEX_OUT_OF_RANGE, index, size); } } @Override protected void rangeCheck(long index, int size) { if (index > Integer.MAX_VALUE || index < Integer.MIN_VALUE) { throw ErrorHelper.getRuntimeException( getModulePrefixedReason(ARRAY_LANG_LIB, INDEX_OUT_OF_RANGE_ERROR_IDENTIFIER), ErrorCodes.INDEX_NUMBER_TOO_LARGE, index); } if ((this.tupleType.getRestType() == null && index >= this.maxSize) || (int) index < 0) { throw ErrorHelper.getRuntimeException( getModulePrefixedReason(ARRAY_LANG_LIB, INDEX_OUT_OF_RANGE_ERROR_IDENTIFIER), ErrorCodes.TUPLE_INDEX_OUT_OF_RANGE, index, size); } } @Override protected void fillerValueCheck(int index, int size, int expectedLength) { if (this.size >= index) { return; } if (!TypeChecker.hasFillerValue(this.tupleType.getRestType()) && (index > size)) { throw ErrorHelper.getRuntimeException(ErrorReasons.ILLEGAL_LIST_INSERTION_ERROR, ErrorCodes.ILLEGAL_TUPLE_INSERTION, size, expectedLength); } } /** * Same as {@code prepareForAdd}, except fillerValueCheck is not performed as we are guaranteed to add * elements to consecutive positions. * * @param index last index after add operation completes * @param currentArraySize current array size */ @Override protected void prepareForConsecutiveMultiAdd(long index, int currentArraySize) { int intIndex = (int) index; rangeCheck(index, size); ensureCapacity(intIndex + 1, currentArraySize); resetSize(intIndex); } @Override protected void ensureCapacity(int requestedCapacity, int currentArraySize) { if (requestedCapacity <= currentArraySize) { return; } int newArraySize = currentArraySize + (currentArraySize >> 1); newArraySize = Math.max(newArraySize, requestedCapacity); newArraySize = Math.min(newArraySize, this.maxSize); resizeInternalArray(newArraySize); } @Override protected void checkFixedLength(long length) { if (this.tupleType.getRestType() == null) { throw ErrorHelper.getRuntimeException( getModulePrefixedReason(ARRAY_LANG_LIB, INHERENT_TYPE_VIOLATION_ERROR_IDENTIFIER), ErrorCodes.ILLEGAL_TUPLE_SIZE, size, length); } else if (this.tupleType.getTupleTypes().size() > length) { throw ErrorHelper.getRuntimeException( getModulePrefixedReason(ARRAY_LANG_LIB, INHERENT_TYPE_VIOLATION_ERROR_IDENTIFIER), ErrorCodes.ILLEGAL_TUPLE_WITH_REST_TYPE_SIZE, this.tupleType.getTupleTypes().size(), length); } } @Override protected void unshift(long index, Object[] vals) { handleImmutableArrayValue(); unshiftArray(index, vals.length, getCurrentArrayLength()); addToRefArray(vals, (int) index); } private void prepareForAdd(long index, Object value, int currentArraySize) { int intIndex = (int) index; rangeCheck(index, size); Type elemType; if (index >= this.minSize) { elemType = this.tupleType.getRestType(); } else { elemType = this.tupleType.getTupleTypes().get((int) index); } if (!TypeChecker.checkIsType(value, elemType)) { throw ErrorCreator.createError( getModulePrefixedReason(ARRAY_LANG_LIB, INHERENT_TYPE_VIOLATION_ERROR_IDENTIFIER), ErrorHelper.getErrorDetails(ErrorCodes.INCOMPATIBLE_TYPE, elemType, TypeChecker.getType(value))); } prepareForAddWithoutTypeCheck(currentArraySize, intIndex); } private void prepareForAddWithoutTypeCheck(int currentArraySize, int intIndex) { fillerValueCheck(intIndex, size, intIndex + 1); ensureCapacity(intIndex + 1, currentArraySize); fillValues(intIndex); resetSize(intIndex); } private void fillRead(long index, int currentArraySize) { Type restType = this.tupleType.getRestType(); if (!TypeChecker.hasFillerValue(restType)) { throw ErrorHelper.getRuntimeException(ErrorReasons.ILLEGAL_LIST_INSERTION_ERROR, ErrorCodes.ILLEGAL_TUPLE_INSERTION, size, index + 1); } int intIndex = (int) index; rangeCheck(index, size); ensureCapacity(intIndex + 1, currentArraySize); for (int i = size; i <= index; i++) { this.refValues[i] = restType.getZeroValue(); } resetSize(intIndex); } private void shiftArray(int index) { int nElemsToBeMoved = this.size - 1 - index; if (nElemsToBeMoved >= 0) { System.arraycopy(this.refValues, index + 1, this.refValues, index, nElemsToBeMoved); } this.size--; } private void addToRefArray(Object[] vals, int startIndex) { int endIndex = startIndex + vals.length; for (int i = startIndex, j = 0; i < endIndex; i++, j++) { add(i, vals[j]); } } private void unshiftArray(long index, int unshiftByN, int arrLength) { int lastIndex = size() + unshiftByN - 1; prepareForConsecutiveMultiAdd(lastIndex, arrLength); if (index > lastIndex) { throw ErrorHelper.getRuntimeException( getModulePrefixedReason(ARRAY_LANG_LIB, INDEX_OUT_OF_RANGE_ERROR_IDENTIFIER), ErrorCodes.INDEX_NUMBER_TOO_LARGE, index); } int i = (int) index; System.arraycopy(this.refValues, i, this.refValues, i + unshiftByN, this.size - i); } private int getCurrentArrayLength() { return this.refValues.length; } private void resetSize(int index) { if (index >= size) { size = index + 1; } } }
class TupleValueImpl extends AbstractArrayValue { protected TupleType tupleType; protected Type type; Object[] refValues; private final int minSize; private final boolean hasRestElement; private BTypedesc typedesc; private TypedescValueImpl inherentType; public TupleValueImpl(Object[] values, TupleType type) { this.refValues = values; this.type = this.tupleType = type; this.hasRestElement = this.tupleType.getRestType() != null; List<Type> memTypes = type.getTupleTypes(); int memCount = memTypes.size(); if (values.length < memCount) { this.refValues = Arrays.copyOf(refValues, memCount); for (int i = values.length; i < memCount; i++) { refValues[i] = memTypes.get(i).getZeroValue(); } } this.minSize = memTypes.size(); this.size = refValues.length; } public TupleValueImpl(TupleType type) { this.type = this.tupleType = type; List<Type> memTypes = this.tupleType.getTupleTypes(); int memTypeCount = memTypes.size(); this.minSize = this.size = memTypeCount; this.hasRestElement = this.tupleType.getRestType() != null; if (type.getRestType() == null) { this.maxSize = this.size; this.refValues = new Object[this.size]; } else { this.refValues = new Object[DEFAULT_ARRAY_SIZE]; } for (int i = 0; i < memTypeCount; i++) { Type memType = memTypes.get(i); if (!TypeChecker.hasFillerValue(memType)) { continue; } this.refValues[i] = memType.getZeroValue(); } } public TupleValueImpl(TupleType type, long size, BListInitialValueEntry[] initialValues) { this(type, initialValues); } public TupleValueImpl(Type type, BListInitialValueEntry[] initialValues) { this.type = type; this.tupleType = (TupleType) TypeUtils.getImpliedType(type); List<Type> memTypes = this.tupleType.getTupleTypes(); int memCount = memTypes.size(); if (tupleType.getRestType() != null) { int valueCount = 0; for (BListInitialValueEntry listEntry : initialValues) { if (listEntry instanceof ListInitialValueEntry.ExpressionEntry) { valueCount++; } else { BArray values = ((ListInitialValueEntry.SpreadEntry) listEntry).values; valueCount += values.size(); } } this.size = Math.max(valueCount, memCount); } else { this.size = memCount; } this.minSize = memCount; this.hasRestElement = this.tupleType.getRestType() != null; if (tupleType.getRestType() == null) { this.maxSize = this.size; this.refValues = new Object[this.size]; } else { this.refValues = new Object[DEFAULT_ARRAY_SIZE]; } int index = 0; for (BListInitialValueEntry listEntry : initialValues) { if (listEntry instanceof ListInitialValueEntry.ExpressionEntry expressionEntry) { addRefValue(index++, expressionEntry.value); } else { BArray values = ((ListInitialValueEntry.SpreadEntry) listEntry).values; BIterator<?> iterator = values.getIterator(); while (iterator.hasNext()) { addRefValue(index++, iterator.next()); } } } if (index >= memCount) { return; } for (int i = index; i < memCount; i++) { Type memType = memTypes.get(i); if (!TypeChecker.hasFillerValue(memType)) { continue; } this.refValues[i] = memType.getZeroValue(); } } public TupleValueImpl(Type type, BListInitialValueEntry[] initialValues, TypedescValueImpl inherentType) { this(type, initialValues); this.inherentType = inherentType; } @Override public BTypedesc getTypedesc() { if (this.typedesc == null) { if (inherentType != null) { this.typedesc = getTypedescValue(type.isReadOnly(), this, inherentType); } else { this.typedesc = getTypedescValue(type, this); } } return typedesc; } /** * Get value in the given array index. * * @param index array index * @return array value */ @Override public Object get(long index) { rangeCheckForGet(index, this.size); return this.refValues[(int) index]; } /** * Get ref value in the given index. * * @param index array index * @return array value */ @Override public Object getRefValue(long index) { return get(index); } @Override public Object fillAndGetRefValue(long index) { if (index >= this.size && this.hasRestElement) { handleImmutableArrayValue(); fillRead(index, refValues.length); return this.refValues[(int) index]; } return get(index); } /** * Get int value in the given index. * * @param index array index * @return array element */ @Override public long getInt(long index) { return (Long) get(index); } /** * Get boolean value in the given index. * * @param index array index * @return array element */ @Override public boolean getBoolean(long index) { return (Boolean) get(index); } /** * Get byte value in the given index. * * @param index array index * @return array element */ @Override public byte getByte(long index) { Object value = get(index); if (value instanceof Long) { return ((Long) value).byteValue(); } return (Byte) value; } /** * Get float value in the given index. * * @param index array index * @return array element */ @Override public double getFloat(long index) { return (Double) get(index); } /** * Get string value in the given index. * * @param index array index * @return array element */ @Override public String getString(long index) { return get(index).toString(); } /** * Get string value in the given index. * * @param index array index * @return array element */ @Override public BString getBString(long index) { return (BString) get(index); } /** * Add ref value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, Object value) { handleImmutableArrayValue(); addRefValue(index, value); } public void addRefValue(long index, Object value) { prepareForAdd(index, value, refValues.length); refValues[(int) index] = value; } public void convertStringAndAddRefValue(long index, BString value) { rangeCheck(index, size); int intIndex = (int) index; Type elemType; if (index >= this.minSize) { elemType = this.tupleType.getRestType(); } else { elemType = this.tupleType.getTupleTypes().get(intIndex); } Object val = ValueConverter.getConvertedStringValue(value, elemType); prepareForAddWithoutTypeCheck(refValues.length, intIndex); refValues[intIndex] = val; } public void addRefValueForcefully(int index, Object value) { prepareForAddForcefully(index, refValues.length); refValues[index] = value; } /** * Add int value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, long value) { add(index, Long.valueOf(value)); } /** * Add boolean value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, boolean value) { add(index, Boolean.valueOf(value)); } /** * Add byte value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, byte value) { add(index, Byte.valueOf(value)); } /** * Add double value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, double value) { add(index, Double.valueOf(value)); } /** * Add string value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, String value) { add(index, (Object) value); } /** * Add string value to the given array index. * * @param index array index * @param value value to be added */ @Override public void add(long index, BString value) { add(index, (Object) value); } /** * Append value to the existing array. * * @param value value to be appended */ @Override public void append(Object value) { add(size, value); } @Override public Object shift(long index) { handleImmutableArrayValue(); validateTupleSizeAndInherentType(); Object val = get(index); shiftArray((int) index); return val; } /** * Removes and returns first member of an array. * * @return the value that was the first member of the array */ @Override public Object shift() { return shift(0); } @Override public void unshift(Object[] values) { unshift(0, values); } @Override public String stringValue(BLink parent) { StringJoiner sj = new StringJoiner(","); for (int i = 0; i < this.size; i++) { Object value = this.refValues[i]; Type type = TypeChecker.getType(value); CycleUtils.Node parentNode = new CycleUtils.Node(this, parent); switch (type.getTag()) { case TypeTags.STRING_TAG: case TypeTags.XML_TAG: case TypeTags.XML_ELEMENT_TAG: case TypeTags.XML_ATTRIBUTES_TAG: case TypeTags.XML_COMMENT_TAG: case TypeTags.XML_PI_TAG: case TypeTags.XMLNS_TAG: case TypeTags.XML_TEXT_TAG: sj.add(((BValue) value).informalStringValue(parentNode)); break; case TypeTags.NULL_TAG: sj.add("null"); break; default: sj.add(getStringVal(value, new CycleUtils.Node(this, parentNode))); break; } } return "[" + sj + "]"; } @Override public String expressionStringValue(BLink parent) { StringJoiner sj = new StringJoiner(","); for (int i = 0; i < this.size; i++) { sj.add(getExpressionStringVal(this.refValues[i], new CycleUtils.Node(this, parent))); } return "[" + sj + "]"; } @Override public Type getType() { return this.type; } @Override public int size() { return this.size; } @Override public boolean isEmpty() { return this.size == 0; } @Override public BArray slice(long startIndex, long endIndex) { return null; } @Override public Object copy(Map<Object, Object> refs) { if (isFrozen()) { return this; } if (refs.containsKey(this)) { return refs.get(this); } Object[] values = new Object[this.size]; TupleValueImpl refValueArray = new TupleValueImpl(values, this.tupleType); refs.put(this, refValueArray); IntStream.range(0, this.size).forEach(i -> { Object value = this.refValues[i]; if (value instanceof BRefValue) { values[i] = ((BRefValue) value).copy(refs); } else { values[i] = value; } }); return refValueArray; } /** * Get ref values array. * * @return ref value array */ @Override public Object[] getValues() { return refValues; } /** * Get a copy of byte array. * * @return byte array */ @Override public byte[] getBytes() { throw new UnsupportedOperationException(); } /** * Get a copy of string array. * * @return string array */ @Override public String[] getStringArray() { throw new UnsupportedOperationException(); } /** * Get a copy of int array. * * @return int array */ @Override public long[] getIntArray() { throw new UnsupportedOperationException(); } @Override public boolean[] getBooleanArray() { throw new UnsupportedOperationException(); } @Override public byte[] getByteArray() { throw new UnsupportedOperationException(); } @Override public double[] getFloatArray() { throw new UnsupportedOperationException(); } @Override public void serialize(OutputStream outputStream) { try { outputStream.write(this.toString().getBytes(Charset.defaultCharset())); } catch (IOException e) { throw ErrorCreator.createError(StringUtils.fromString("error occurred while serializing data"), e); } } /** * {@inheritDoc} */ @Override public void freezeDirect() { if (tupleType.isReadOnly()) { return; } this.type = ReadOnlyUtils.setImmutableTypeAndGetEffectiveType(this.type); this.tupleType = (TupleType) TypeUtils.getImpliedType(type); for (int i = 0; i < this.size; i++) { Object value = this.get(i); if (value instanceof BRefValue) { ((BRefValue) value).freezeDirect(); } } this.typedesc = null; } /** * {@inheritDoc} */ @Override public IteratorValue getIterator() { return new ArrayIterator(this); } /** * Get {@code BType} of the array elements. * * @return element type */ @Override public Type getElementType() { throw new UnsupportedOperationException(); } @Override protected void resizeInternalArray(int newLength) { refValues = Arrays.copyOf(refValues, newLength); } @Override protected void fillValues(int index) { if (index <= size) { return; } Type restType = this.tupleType.getRestType(); if (restType != null) { for (int i = size; i < index; i++) { this.refValues[i] = restType.getZeroValue(); } } } @Override protected void rangeCheckForGet(long index, int size) { rangeCheck(index, size); if (index < 0 || index >= size) { throw ErrorHelper.getRuntimeException(getModulePrefixedReason(ARRAY_LANG_LIB, INDEX_OUT_OF_RANGE_ERROR_IDENTIFIER), ErrorCodes.TUPLE_INDEX_OUT_OF_RANGE, index, size); } } @Override protected void rangeCheck(long index, int size) { if (index > Integer.MAX_VALUE || index < Integer.MIN_VALUE) { throw ErrorHelper.getRuntimeException(getModulePrefixedReason(ARRAY_LANG_LIB, INDEX_OUT_OF_RANGE_ERROR_IDENTIFIER), ErrorCodes.INDEX_NUMBER_TOO_LARGE, index); } if ((this.tupleType.getRestType() == null && index >= this.maxSize) || (int) index < 0) { throw ErrorHelper.getRuntimeException(getModulePrefixedReason(ARRAY_LANG_LIB, INDEX_OUT_OF_RANGE_ERROR_IDENTIFIER), ErrorCodes.TUPLE_INDEX_OUT_OF_RANGE, index, size); } } @Override protected void fillerValueCheck(int index, int size, int expectedLength) { if (this.size >= index) { return; } if (!TypeChecker.hasFillerValue(this.tupleType.getRestType()) && (index > size)) { throw ErrorHelper.getRuntimeException(ErrorReasons.ILLEGAL_LIST_INSERTION_ERROR, ErrorCodes.ILLEGAL_TUPLE_INSERTION, size, expectedLength); } } /** * Same as {@code prepareForAdd}, except fillerValueCheck is not performed as we are guaranteed to add * elements to consecutive positions. * * @param index last index after add operation completes * @param currentArraySize current array size */ @Override protected void prepareForConsecutiveMultiAdd(long index, int currentArraySize) { int intIndex = (int) index; rangeCheck(index, size); ensureCapacity(intIndex + 1, currentArraySize); resetSize(intIndex); } @Override protected void ensureCapacity(int requestedCapacity, int currentArraySize) { if (requestedCapacity <= currentArraySize) { return; } int newArraySize = currentArraySize + (currentArraySize >> 1); newArraySize = Math.max(newArraySize, requestedCapacity); newArraySize = Math.min(newArraySize, this.maxSize); resizeInternalArray(newArraySize); } @Override protected void checkFixedLength(long length) { if (this.tupleType.getRestType() == null) { throw ErrorHelper.getRuntimeException(getModulePrefixedReason(ARRAY_LANG_LIB, INHERENT_TYPE_VIOLATION_ERROR_IDENTIFIER), ErrorCodes.ILLEGAL_TUPLE_SIZE, size, length); } else if (this.tupleType.getTupleTypes().size() > length) { throw ErrorHelper.getRuntimeException(getModulePrefixedReason(ARRAY_LANG_LIB, INHERENT_TYPE_VIOLATION_ERROR_IDENTIFIER), ErrorCodes.ILLEGAL_TUPLE_WITH_REST_TYPE_SIZE, this.tupleType.getTupleTypes().size(), length); } } @Override protected void unshift(long index, Object[] vals) { handleImmutableArrayValue(); unshiftArray(index, vals.length, getCurrentArrayLength()); addToRefArray(vals, (int) index); } private void prepareForAdd(long index, Object value, int currentArraySize) { int intIndex = (int) index; rangeCheck(index, size); Type elemType; if (index >= this.minSize) { elemType = this.tupleType.getRestType(); } else { elemType = this.tupleType.getTupleTypes().get((int) index); } if (!TypeChecker.checkIsType(value, elemType)) { throw ErrorCreator.createError(getModulePrefixedReason(ARRAY_LANG_LIB, INHERENT_TYPE_VIOLATION_ERROR_IDENTIFIER), ErrorHelper.getErrorDetails(ErrorCodes.INCOMPATIBLE_TYPE, elemType, TypeChecker.getType(value))); } prepareForAddWithoutTypeCheck(currentArraySize, intIndex); } private void prepareForAddWithoutTypeCheck(int currentArraySize, int intIndex) { fillerValueCheck(intIndex, size, intIndex + 1); ensureCapacity(intIndex + 1, currentArraySize); fillValues(intIndex); resetSize(intIndex); } private void fillRead(long index, int currentArraySize) { Type restType = this.tupleType.getRestType(); if (!TypeChecker.hasFillerValue(restType)) { throw ErrorHelper.getRuntimeException(ErrorReasons.ILLEGAL_LIST_INSERTION_ERROR, ErrorCodes.ILLEGAL_TUPLE_INSERTION, size, index + 1); } int intIndex = (int) index; rangeCheck(index, size); ensureCapacity(intIndex + 1, currentArraySize); for (int i = size; i <= index; i++) { this.refValues[i] = restType.getZeroValue(); } resetSize(intIndex); } private void shiftArray(int index) { int nElemsToBeMoved = this.size - 1 - index; if (nElemsToBeMoved >= 0) { System.arraycopy(this.refValues, index + 1, this.refValues, index, nElemsToBeMoved); } this.size--; } private void addToRefArray(Object[] vals, int startIndex) { int endIndex = startIndex + vals.length; for (int i = startIndex, j = 0; i < endIndex; i++, j++) { add(i, vals[j]); } } private void unshiftArray(long index, int unshiftByN, int arrLength) { int lastIndex = size() + unshiftByN - 1; prepareForConsecutiveMultiAdd(lastIndex, arrLength); if (index > lastIndex) { throw ErrorHelper.getRuntimeException(getModulePrefixedReason(ARRAY_LANG_LIB, INDEX_OUT_OF_RANGE_ERROR_IDENTIFIER), ErrorCodes.INDEX_NUMBER_TOO_LARGE, index); } int i = (int) index; System.arraycopy(this.refValues, i, this.refValues, i + unshiftByN, this.size - i); } private int getCurrentArrayLength() { return this.refValues.length; } private void resetSize(int index) { if (index >= size) { size = index + 1; } } }
These two return two different results. Original one is : 2 ^ (tryCount-1) Now: (tryCount -1) ^ 2 What if tryCount = 1, does delay expect to be negative?
long calculateDelayInMs(int tryCount) { long delay; switch (this.retryPolicyType) { case EXPONENTIAL: delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs; break; case FIXED: delay = this.retryDelayInMs; break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid retry policy type.")); } return Math.min(delay, this.maxRetryDelayInMs); }
delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs;
long calculateDelayInMs(int tryCount) { long delay; switch (this.retryPolicyType) { case EXPONENTIAL: delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs; break; case FIXED: delay = this.retryDelayInMs; break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid retry policy type.")); } return Math.min(delay, this.maxRetryDelayInMs); }
class RequestRetryOptions { private final ClientLogger logger = new ClientLogger(RequestRetryOptions.class); private final int maxTries; private final int tryTimeout; private final long retryDelayInMs; private final long maxRetryDelayInMs; private final RetryPolicyType retryPolicyType; private final String secondaryHost; /** * Configures how the {@link HttpPipeline} should retry requests. */ public RequestRetryOptions() { this(RetryPolicyType.EXPONENTIAL, null, null, null, null, null); } /** * Configures how the {@link HttpPipeline} should retry requests. * * @param retryPolicyType Optional. A {@link RetryPolicyType} specifying the type of retry pattern to use, default * value is {@link RetryPolicyType * @param maxTries Optional. Maximum number of attempts an operation will be retried, default is {@code 4}. * @param tryTimeout Optional. Specified the maximum time allowed before a request is cancelled and assumed failed, * default is {@link Integer * * <p>This value should be based on the bandwidth available to the host machine and proximity to the Storage * service, a good starting point may be 60 seconds per MB of anticipated payload size.</p> * @param retryDelayInMs Optional. Specifies the amount of delay to use before retrying an operation, default value * is {@code 4ms} when {@code retryPolicyType} is {@link RetryPolicyType * when {@code retryPolicyType} is {@link RetryPolicyType * @param maxRetryDelayInMs Optional. Specifies the maximum delay allowed before retrying an operation, default * value is {@code 120ms}. * @param secondaryHost Optional. Specified a secondary Storage account to retry requests against, default is none. * * <p>Before setting this understand the issues around reading stale and potentially-inconsistent data, view these * <a href=https: * for more information.</p> * @throws IllegalArgumentException If {@code retryDelayInMs} and {@code maxRetryDelayInMs} are not both null or * non-null or {@code retryPolicyType} isn't {@link RetryPolicyType */ public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, Integer tryTimeout, Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { this.retryPolicyType = retryPolicyType == null ? RetryPolicyType.EXPONENTIAL : retryPolicyType; if (maxTries != null) { Utility.assertInBounds("maxRetries", maxTries, 1, Integer.MAX_VALUE); this.maxTries = maxTries; } else { this.maxTries = 4; } if (tryTimeout != null) { Utility.assertInBounds("tryTimeout", tryTimeout, 1, Integer.MAX_VALUE); this.tryTimeout = tryTimeout; } else { this.tryTimeout = Integer.MAX_VALUE; } if ((retryDelayInMs == null && maxRetryDelayInMs != null) || (retryDelayInMs != null && maxRetryDelayInMs == null)) { throw logger.logExceptionAsError( new IllegalArgumentException("Both retryDelay and maxRetryDelay must be null or neither can be null")); } if (retryDelayInMs != null) { Utility.assertInBounds("maxRetryDelayInMs", maxRetryDelayInMs, 1, Long.MAX_VALUE); Utility.assertInBounds("retryDelayInMs", retryDelayInMs, 1, maxRetryDelayInMs); this.maxRetryDelayInMs = maxRetryDelayInMs; this.retryDelayInMs = retryDelayInMs; } else { switch (this.retryPolicyType) { case EXPONENTIAL: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(4); break; case FIXED: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(30); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid 'RetryPolicyType'.")); } this.maxRetryDelayInMs = TimeUnit.SECONDS.toMillis(120); } this.secondaryHost = secondaryHost; } /** * @return the maximum number of retries that will be attempted. */ public int maxTries() { return this.maxTries; } /** * @return the maximum time, in seconds, allowed for a request until it is considered timed out. */ public int tryTimeout() { return this.tryTimeout; } /** * @return the URI of the secondary host where retries are attempted. If this is null then there is no secondary * host and all retries are attempted against the original host. */ public String secondaryHost() { return this.secondaryHost; } /** * @return the delay in milliseconds between each retry attempt. */ public long retryDelayInMs() { return retryDelayInMs; } /** * @return the maximum delay in milliseconds allowed between each retry. */ public long maxRetryDelayInMs() { return maxRetryDelayInMs; } /** * Calculates how long to delay before sending the next request. * * @param tryCount An {@code int} indicating which try we are on. * @return A {@code long} value of how many milliseconds to delay. */ }
class RequestRetryOptions { private final ClientLogger logger = new ClientLogger(RequestRetryOptions.class); private final int maxTries; private final int tryTimeout; private final long retryDelayInMs; private final long maxRetryDelayInMs; private final RetryPolicyType retryPolicyType; private final String secondaryHost; /** * Configures how the {@link HttpPipeline} should retry requests. */ public RequestRetryOptions() { this(RetryPolicyType.EXPONENTIAL, null, null, null, null, null); } /** * Configures how the {@link HttpPipeline} should retry requests. * * @param retryPolicyType Optional. A {@link RetryPolicyType} specifying the type of retry pattern to use, default * value is {@link RetryPolicyType * @param maxTries Optional. Maximum number of attempts an operation will be retried, default is {@code 4}. * @param tryTimeout Optional. Specified the maximum time allowed before a request is cancelled and assumed failed, * default is {@link Integer * * <p>This value should be based on the bandwidth available to the host machine and proximity to the Storage * service, a good starting point may be 60 seconds per MB of anticipated payload size.</p> * @param retryDelayInMs Optional. Specifies the amount of delay to use before retrying an operation, default value * is {@code 4ms} when {@code retryPolicyType} is {@link RetryPolicyType * when {@code retryPolicyType} is {@link RetryPolicyType * @param maxRetryDelayInMs Optional. Specifies the maximum delay allowed before retrying an operation, default * value is {@code 120ms}. * @param secondaryHost Optional. Specified a secondary Storage account to retry requests against, default is none. * * <p>Before setting this understand the issues around reading stale and potentially-inconsistent data, view these * <a href=https: * for more information.</p> * @throws IllegalArgumentException If {@code retryDelayInMs} and {@code maxRetryDelayInMs} are not both null or * non-null or {@code retryPolicyType} isn't {@link RetryPolicyType */ public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, Integer tryTimeout, Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { this.retryPolicyType = retryPolicyType == null ? RetryPolicyType.EXPONENTIAL : retryPolicyType; if (maxTries != null) { Utility.assertInBounds("maxRetries", maxTries, 1, Integer.MAX_VALUE); this.maxTries = maxTries; } else { this.maxTries = 4; } if (tryTimeout != null) { Utility.assertInBounds("tryTimeout", tryTimeout, 1, Integer.MAX_VALUE); this.tryTimeout = tryTimeout; } else { this.tryTimeout = Integer.MAX_VALUE; } if ((retryDelayInMs == null && maxRetryDelayInMs != null) || (retryDelayInMs != null && maxRetryDelayInMs == null)) { throw logger.logExceptionAsError( new IllegalArgumentException("Both retryDelay and maxRetryDelay must be null or neither can be null")); } if (retryDelayInMs != null) { Utility.assertInBounds("maxRetryDelayInMs", maxRetryDelayInMs, 1, Long.MAX_VALUE); Utility.assertInBounds("retryDelayInMs", retryDelayInMs, 1, maxRetryDelayInMs); this.maxRetryDelayInMs = maxRetryDelayInMs; this.retryDelayInMs = retryDelayInMs; } else { switch (this.retryPolicyType) { case EXPONENTIAL: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(4); break; case FIXED: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(30); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid 'RetryPolicyType'.")); } this.maxRetryDelayInMs = TimeUnit.SECONDS.toMillis(120); } this.secondaryHost = secondaryHost; } /** * @return the maximum number of retries that will be attempted. */ public int maxTries() { return this.maxTries; } /** * @return the maximum time, in seconds, allowed for a request until it is considered timed out. */ public int tryTimeout() { return this.tryTimeout; } /** * @return the URI of the secondary host where retries are attempted. If this is null then there is no secondary * host and all retries are attempted against the original host. */ public String secondaryHost() { return this.secondaryHost; } /** * @return the delay in milliseconds between each retry attempt. */ public long retryDelayInMs() { return retryDelayInMs; } /** * @return the maximum delay in milliseconds allowed between each retry. */ public long maxRetryDelayInMs() { return maxRetryDelayInMs; } /** * Calculates how long to delay before sending the next request. * * @param tryCount An {@code int} indicating which try we are on. * @return A {@code long} value of how many milliseconds to delay. */ }
when we add multiLayerProjection, we do not modify its original projects. That is why only translator need to be modified.
public PlanFragment visitPhysicalProject(PhysicalProject<? extends Plan> project, PlanTranslatorContext context) { if (project.child(0) instanceof AbstractPhysicalJoin) { ((AbstractPhysicalJoin<?, ?>) project.child(0)).setShouldTranslateOutput(false); } if (project.child(0) instanceof PhysicalFilter) { if (project.child(0).child(0) instanceof AbstractPhysicalJoin) { ((AbstractPhysicalJoin<?, ?>) project.child(0).child(0)).setShouldTranslateOutput(false); } } PlanFragment inputFragment = project.child(0).accept(this, context); if (inputFragment.getPlanRoot() instanceof OlapScanNode) { registerRewrittenSlot(project, (OlapScanNode) inputFragment.getPlanRoot()); } PlanNode inputPlanNode = inputFragment.getPlanRoot(); List<Expr> projectionExprs = null; List<Expr> allProjectionExprs = Lists.newArrayList(); List<Slot> slots = null; if (project.hasMultiLayerProjection() && !(inputFragment instanceof MultiCastPlanFragment)) { int layerCount = project.getMultiLayerProjects().size(); for (int i = 0; i < layerCount; i++) { List<NamedExpression> layer = project.getMultiLayerProjects().get(i); projectionExprs = layer.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); slots = layer.stream() .map(NamedExpression::toSlot) .collect(Collectors.toList()); if (i < layerCount - 1) { inputPlanNode.addIntermediateProjectList(projectionExprs); TupleDescriptor projectionTuple = generateTupleDesc(slots, null, context); inputPlanNode.addIntermediateOutputTupleDescList(projectionTuple); } allProjectionExprs.addAll(projectionExprs); } } else { projectionExprs = project.getProjects() .stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); slots = project.getProjects() .stream() .map(NamedExpression::toSlot) .collect(Collectors.toList()); allProjectionExprs.addAll(projectionExprs); } if (inputFragment instanceof MultiCastPlanFragment) { MultiCastDataSink multiCastDataSink = (MultiCastDataSink) inputFragment.getSink(); DataStreamSink dataStreamSink = multiCastDataSink.getDataStreamSinks().get( multiCastDataSink.getDataStreamSinks().size() - 1); TupleDescriptor projectionTuple = generateTupleDesc(slots, null, context); dataStreamSink.setProjections(projectionExprs); dataStreamSink.setOutputTupleDesc(projectionTuple); return inputFragment; } List<Expr> conjuncts = inputPlanNode.getConjuncts(); Set<SlotId> requiredSlotIdSet = Sets.newHashSet(); for (Expr expr : allProjectionExprs) { Expr.extractSlots(expr, requiredSlotIdSet); } Set<SlotId> requiredByProjectSlotIdSet = Sets.newHashSet(requiredSlotIdSet); for (Expr expr : conjuncts) { Expr.extractSlots(expr, requiredSlotIdSet); } if (inputPlanNode instanceof JoinNodeBase) { TupleDescriptor tupleDescriptor = generateTupleDesc(slots, null, context); JoinNodeBase joinNode = (JoinNodeBase) inputPlanNode; joinNode.setOutputTupleDesc(tupleDescriptor); joinNode.setProjectList(projectionExprs); if (joinNode instanceof HashJoinNode) { ((HashJoinNode) joinNode).getHashOutputSlotIds().clear(); Set<ExprId> requiredExprIds = Sets.newHashSet(); Set<SlotId> requiredOtherConjunctsSlotIdSet = Sets.newHashSet(); List<Expr> otherConjuncts = ((HashJoinNode) joinNode).getOtherJoinConjuncts(); for (Expr expr : otherConjuncts) { Expr.extractSlots(expr, requiredOtherConjunctsSlotIdSet); } if (!((HashJoinNode) joinNode).getEqJoinConjuncts().isEmpty() && !((HashJoinNode) joinNode).getMarkJoinConjuncts().isEmpty()) { List<Expr> markConjuncts = ((HashJoinNode) joinNode).getMarkJoinConjuncts(); for (Expr expr : markConjuncts) { Expr.extractSlots(expr, requiredOtherConjunctsSlotIdSet); } } requiredOtherConjunctsSlotIdSet.forEach(e -> requiredExprIds.add(context.findExprId(e))); requiredSlotIdSet.forEach(e -> requiredExprIds.add(context.findExprId(e))); for (ExprId exprId : requiredExprIds) { SlotId slotId = ((HashJoinNode) joinNode).getHashOutputExprSlotIdMap().get(exprId); if (slotId != null) { ((HashJoinNode) joinNode).addSlotIdToHashOutputSlotIds(slotId); } } } return inputFragment; } if (inputPlanNode instanceof TableFunctionNode) { TableFunctionNode tableFunctionNode = (TableFunctionNode) inputPlanNode; tableFunctionNode.setOutputSlotIds(Lists.newArrayList(requiredSlotIdSet)); } if (inputPlanNode instanceof ScanNode) { TupleDescriptor projectionTuple = null; List<SlotId> slotIdsByOrder = Lists.newArrayList(); if (requiredByProjectSlotIdSet.size() != requiredSlotIdSet.size() || new HashSet<>(projectionExprs).size() != projectionExprs.size() || projectionExprs.stream().anyMatch(expr -> !(expr instanceof SlotRef))) { projectionTuple = generateTupleDesc(slots, ((ScanNode) inputPlanNode).getTupleDesc().getTable(), context); inputPlanNode.setProjectList(projectionExprs); inputPlanNode.setOutputTupleDesc(projectionTuple); } else { for (int i = 0; i < slots.size(); ++i) { context.addExprIdSlotRefPair(slots.get(i).getExprId(), (SlotRef) projectionExprs.get(i)); slotIdsByOrder.add(((SlotRef) projectionExprs.get(i)).getSlotId()); } } if (inputPlanNode instanceof OlapScanNode) { ArrayList<SlotDescriptor> olapScanSlots = context.getTupleDesc(inputPlanNode.getTupleIds().get(0)).getSlots(); SlotDescriptor lastSlot = olapScanSlots.get(olapScanSlots.size() - 1); if (lastSlot.getColumn() != null && lastSlot.getColumn().getName().equals(Column.ROWID_COL)) { if (projectionTuple != null) { injectRowIdColumnSlot(projectionTuple); SlotRef slotRef = new SlotRef(lastSlot); inputPlanNode.getProjectList().add(slotRef); requiredByProjectSlotIdSet.add(lastSlot.getId()); } else { slotIdsByOrder.add(lastSlot.getId()); } requiredSlotIdSet.add(lastSlot.getId()); } } updateScanSlotsMaterialization((ScanNode) inputPlanNode, requiredSlotIdSet, requiredByProjectSlotIdSet, slotIdsByOrder, context); } else { TupleDescriptor tupleDescriptor = generateTupleDesc(slots, null, context); inputPlanNode.setProjectList(projectionExprs); inputPlanNode.setOutputTupleDesc(tupleDescriptor); } return inputFragment; }
if (project.hasMultiLayerProjection() && !(inputFragment instanceof MultiCastPlanFragment)) {
public PlanFragment visitPhysicalProject(PhysicalProject<? extends Plan> project, PlanTranslatorContext context) { if (project.child(0) instanceof AbstractPhysicalJoin) { ((AbstractPhysicalJoin<?, ?>) project.child(0)).setShouldTranslateOutput(false); } if (project.child(0) instanceof PhysicalFilter) { if (project.child(0).child(0) instanceof AbstractPhysicalJoin) { ((AbstractPhysicalJoin<?, ?>) project.child(0).child(0)).setShouldTranslateOutput(false); } } PlanFragment inputFragment = project.child(0).accept(this, context); if (inputFragment.getPlanRoot() instanceof OlapScanNode) { registerRewrittenSlot(project, (OlapScanNode) inputFragment.getPlanRoot()); } PlanNode inputPlanNode = inputFragment.getPlanRoot(); List<Expr> projectionExprs = null; List<Expr> allProjectionExprs = Lists.newArrayList(); List<Slot> slots = null; if (project.hasMultiLayerProjection() && !(inputFragment instanceof MultiCastPlanFragment)) { int layerCount = project.getMultiLayerProjects().size(); for (int i = 0; i < layerCount; i++) { List<NamedExpression> layer = project.getMultiLayerProjects().get(i); projectionExprs = layer.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); slots = layer.stream() .map(NamedExpression::toSlot) .collect(Collectors.toList()); if (i < layerCount - 1) { inputPlanNode.addIntermediateProjectList(projectionExprs); TupleDescriptor projectionTuple = generateTupleDesc(slots, null, context); inputPlanNode.addIntermediateOutputTupleDescList(projectionTuple); } allProjectionExprs.addAll(projectionExprs); } } else { projectionExprs = project.getProjects() .stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); slots = project.getProjects() .stream() .map(NamedExpression::toSlot) .collect(Collectors.toList()); allProjectionExprs.addAll(projectionExprs); } if (inputFragment instanceof MultiCastPlanFragment) { MultiCastDataSink multiCastDataSink = (MultiCastDataSink) inputFragment.getSink(); DataStreamSink dataStreamSink = multiCastDataSink.getDataStreamSinks().get( multiCastDataSink.getDataStreamSinks().size() - 1); TupleDescriptor projectionTuple = generateTupleDesc(slots, null, context); dataStreamSink.setProjections(projectionExprs); dataStreamSink.setOutputTupleDesc(projectionTuple); return inputFragment; } List<Expr> conjuncts = inputPlanNode.getConjuncts(); Set<SlotId> requiredSlotIdSet = Sets.newHashSet(); for (Expr expr : allProjectionExprs) { Expr.extractSlots(expr, requiredSlotIdSet); } Set<SlotId> requiredByProjectSlotIdSet = Sets.newHashSet(requiredSlotIdSet); for (Expr expr : conjuncts) { Expr.extractSlots(expr, requiredSlotIdSet); } if (inputPlanNode instanceof JoinNodeBase) { TupleDescriptor tupleDescriptor = generateTupleDesc(slots, null, context); JoinNodeBase joinNode = (JoinNodeBase) inputPlanNode; joinNode.setOutputTupleDesc(tupleDescriptor); joinNode.setProjectList(projectionExprs); if (joinNode instanceof HashJoinNode) { ((HashJoinNode) joinNode).getHashOutputSlotIds().clear(); Set<ExprId> requiredExprIds = Sets.newHashSet(); Set<SlotId> requiredOtherConjunctsSlotIdSet = Sets.newHashSet(); List<Expr> otherConjuncts = ((HashJoinNode) joinNode).getOtherJoinConjuncts(); for (Expr expr : otherConjuncts) { Expr.extractSlots(expr, requiredOtherConjunctsSlotIdSet); } if (!((HashJoinNode) joinNode).getEqJoinConjuncts().isEmpty() && !((HashJoinNode) joinNode).getMarkJoinConjuncts().isEmpty()) { List<Expr> markConjuncts = ((HashJoinNode) joinNode).getMarkJoinConjuncts(); for (Expr expr : markConjuncts) { Expr.extractSlots(expr, requiredOtherConjunctsSlotIdSet); } } requiredOtherConjunctsSlotIdSet.forEach(e -> requiredExprIds.add(context.findExprId(e))); requiredSlotIdSet.forEach(e -> requiredExprIds.add(context.findExprId(e))); for (ExprId exprId : requiredExprIds) { SlotId slotId = ((HashJoinNode) joinNode).getHashOutputExprSlotIdMap().get(exprId); if (slotId != null) { ((HashJoinNode) joinNode).addSlotIdToHashOutputSlotIds(slotId); } } } return inputFragment; } if (inputPlanNode instanceof TableFunctionNode) { TableFunctionNode tableFunctionNode = (TableFunctionNode) inputPlanNode; tableFunctionNode.setOutputSlotIds(Lists.newArrayList(requiredSlotIdSet)); } if (inputPlanNode instanceof ScanNode) { TupleDescriptor projectionTuple = null; List<SlotId> slotIdsByOrder = Lists.newArrayList(); if (requiredByProjectSlotIdSet.size() != requiredSlotIdSet.size() || new HashSet<>(projectionExprs).size() != projectionExprs.size() || projectionExprs.stream().anyMatch(expr -> !(expr instanceof SlotRef))) { projectionTuple = generateTupleDesc(slots, ((ScanNode) inputPlanNode).getTupleDesc().getTable(), context); inputPlanNode.setProjectList(projectionExprs); inputPlanNode.setOutputTupleDesc(projectionTuple); } else { for (int i = 0; i < slots.size(); ++i) { context.addExprIdSlotRefPair(slots.get(i).getExprId(), (SlotRef) projectionExprs.get(i)); slotIdsByOrder.add(((SlotRef) projectionExprs.get(i)).getSlotId()); } } if (inputPlanNode instanceof OlapScanNode) { ArrayList<SlotDescriptor> olapScanSlots = context.getTupleDesc(inputPlanNode.getTupleIds().get(0)).getSlots(); SlotDescriptor lastSlot = olapScanSlots.get(olapScanSlots.size() - 1); if (lastSlot.getColumn() != null && lastSlot.getColumn().getName().equals(Column.ROWID_COL)) { if (projectionTuple != null) { injectRowIdColumnSlot(projectionTuple); SlotRef slotRef = new SlotRef(lastSlot); inputPlanNode.getProjectList().add(slotRef); requiredByProjectSlotIdSet.add(lastSlot.getId()); } else { slotIdsByOrder.add(lastSlot.getId()); } requiredSlotIdSet.add(lastSlot.getId()); } } updateScanSlotsMaterialization((ScanNode) inputPlanNode, requiredSlotIdSet, requiredByProjectSlotIdSet, slotIdsByOrder, context); } else { TupleDescriptor tupleDescriptor = generateTupleDesc(slots, null, context); inputPlanNode.setProjectList(projectionExprs); inputPlanNode.setOutputTupleDesc(tupleDescriptor); } return inputFragment; }
class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, PlanTranslatorContext> { private static final Logger LOG = LogManager.getLogger(PhysicalPlanTranslator.class); private final StatsErrorEstimator statsErrorEstimator; private final PlanTranslatorContext context; public PhysicalPlanTranslator() { this(null, null); } public PhysicalPlanTranslator(PlanTranslatorContext context) { this(context, null); } public PhysicalPlanTranslator(PlanTranslatorContext context, StatsErrorEstimator statsErrorEstimator) { this.context = context; this.statsErrorEstimator = statsErrorEstimator; } /** * Translate Nereids Physical Plan tree to Stale Planner PlanFragment tree. * * @param physicalPlan Nereids Physical Plan tree * @return Stale Planner PlanFragment tree */ public PlanFragment translatePlan(PhysicalPlan physicalPlan) { PlanFragment rootFragment = physicalPlan.accept(this, context); if (CollectionUtils.isEmpty(rootFragment.getOutputExprs())) { List<Expr> outputExprs = Lists.newArrayList(); physicalPlan.getOutput().stream().map(Slot::getExprId) .forEach(exprId -> outputExprs.add(context.findSlotRef(exprId))); rootFragment.setOutputExprs(outputExprs); } Collections.reverse(context.getPlanFragments()); context.getDescTable().computeMemLayout(); if (context.getSessionVariable() != null && context.getSessionVariable().forbidUnknownColStats) { Set<ScanNode> scans = context.getScanNodeWithUnknownColumnStats(); if (!scans.isEmpty()) { StringBuilder builder = new StringBuilder(); scans.forEach(builder::append); throw new AnalysisException("tables with unknown column stats: " + builder); } } return rootFragment; } /* ******************************************************************************************** * distribute node * ******************************************************************************************** */ @Override public PlanFragment visitPhysicalDistribute(PhysicalDistribute<? extends Plan> distribute, PlanTranslatorContext context) { Plan child = distribute.child(); PlanFragment inputFragment = child.accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(child); if (inputFragment.getPlanRoot() instanceof AggregationNode && child instanceof PhysicalHashAggregate && context.getFirstAggregateInFragment(inputFragment) == child) { PhysicalHashAggregate<?> hashAggregate = (PhysicalHashAggregate<?>) child; if (hashAggregate.getAggPhase() == AggPhase.LOCAL && hashAggregate.getAggMode() == AggMode.INPUT_TO_BUFFER) { AggregationNode aggregationNode = (AggregationNode) inputFragment.getPlanRoot(); aggregationNode.setUseStreamingPreagg(hashAggregate.isMaybeUsingStream()); } } ExchangeNode exchangeNode = new ExchangeNode(context.nextPlanNodeId(), inputFragment.getPlanRoot()); updateLegacyPlanIdToPhysicalPlan(exchangeNode, distribute); List<ExprId> validOutputIds = distribute.getOutputExprIds(); if (child instanceof PhysicalHashAggregate) { List<ExprId> keys = ((PhysicalHashAggregate<?>) child).getGroupByExpressions().stream() .filter(SlotReference.class::isInstance) .map(SlotReference.class::cast) .map(SlotReference::getExprId) .collect(Collectors.toList()); keys.addAll(validOutputIds); validOutputIds = keys; } else if (child instanceof PhysicalLimit && ((PhysicalLimit<?>) child).getPhase().isGlobal()) { exchangeNode.setOffset(((PhysicalLimit<?>) child).getOffset()); } if (inputFragment instanceof MultiCastPlanFragment) { MultiCastDataSink multiCastDataSink = (MultiCastDataSink) inputFragment.getSink(); DataStreamSink dataStreamSink = multiCastDataSink.getDataStreamSinks().get( multiCastDataSink.getDataStreamSinks().size() - 1); if (!(child instanceof PhysicalProject)) { List<Expr> projectionExprs = new ArrayList<>(); PhysicalCTEConsumer consumer = getCTEConsumerChild(distribute); Preconditions.checkState(consumer != null, "consumer not found"); for (Slot slot : distribute.getOutput()) { projectionExprs.add(ExpressionTranslator.translate(consumer.getProducerSlot(slot), context)); } TupleDescriptor projectionTuple = generateTupleDesc(distribute.getOutput(), null, context); dataStreamSink.setProjections(projectionExprs); dataStreamSink.setOutputTupleDesc(projectionTuple); } } DataPartition dataPartition = toDataPartition(distribute.getDistributionSpec(), validOutputIds, context); exchangeNode.setPartitionType(dataPartition.getType()); exchangeNode.setChildrenDistributeExprLists(distributeExprLists); PlanFragment parentFragment = new PlanFragment(context.nextFragmentId(), exchangeNode, dataPartition); if (distribute.getDistributionSpec() instanceof DistributionSpecGather) { exchangeNode.setNumInstances(1); } else { exchangeNode.setNumInstances(inputFragment.getPlanRoot().getNumInstances()); } if (inputFragment instanceof MultiCastPlanFragment) { MultiCastDataSink multiCastDataSink = (MultiCastDataSink) inputFragment.getSink(); DataStreamSink dataStreamSink = multiCastDataSink.getDataStreamSinks().get( multiCastDataSink.getDataStreamSinks().size() - 1); TupleDescriptor tupleDescriptor = generateTupleDesc(distribute.getOutput(), null, context); exchangeNode.updateTupleIds(tupleDescriptor); dataStreamSink.setExchNodeId(exchangeNode.getId()); dataStreamSink.setOutputPartition(dataPartition); parentFragment.addChild(inputFragment); ((MultiCastPlanFragment) inputFragment).addToDest(exchangeNode); CTEScanNode cteScanNode = context.getCteScanNodeMap().get(inputFragment.getFragmentId()); Preconditions.checkState(cteScanNode != null, "cte scan node is null"); cteScanNode.setFragment(inputFragment); cteScanNode.setPlanNodeId(exchangeNode.getId()); context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> runtimeFilterTranslator.getContext().getPlanNodeIdToCTEDataSinkMap() .put(cteScanNode.getId(), dataStreamSink)); } else { inputFragment.setDestination(exchangeNode); inputFragment.setOutputPartition(dataPartition); DataStreamSink streamSink = new DataStreamSink(exchangeNode.getId()); streamSink.setOutputPartition(dataPartition); inputFragment.setSink(streamSink); } context.addPlanFragment(parentFragment); return parentFragment; } /* ******************************************************************************************** * sink Node, in lexicographical order * ******************************************************************************************** */ @Override public PlanFragment visitPhysicalResultSink(PhysicalResultSink<? extends Plan> physicalResultSink, PlanTranslatorContext context) { PlanFragment planFragment = physicalResultSink.child().accept(this, context); TResultSinkType resultSinkType = context.getConnectContext() != null ? context.getConnectContext().getResultSinkType() : null; planFragment.setSink(new ResultSink(planFragment.getPlanRoot().getId(), resultSinkType)); return planFragment; } @Override public PlanFragment visitPhysicalDeferMaterializeResultSink( PhysicalDeferMaterializeResultSink<? extends Plan> sink, PlanTranslatorContext context) { PlanFragment planFragment = visitPhysicalResultSink(sink.getPhysicalResultSink(), context); TFetchOption fetchOption = sink.getOlapTable().generateTwoPhaseReadOption(sink.getSelectedIndexId()); ((ResultSink) planFragment.getSink()).setFetchOption(fetchOption); return planFragment; } @Override public PlanFragment visitPhysicalOlapTableSink(PhysicalOlapTableSink<? extends Plan> olapTableSink, PlanTranslatorContext context) { PlanFragment rootFragment = olapTableSink.child().accept(this, context); rootFragment.setOutputPartition(DataPartition.UNPARTITIONED); HashSet<String> partialUpdateCols = new HashSet<>(); boolean isPartialUpdate = olapTableSink.isPartialUpdate(); if (isPartialUpdate) { for (Column col : olapTableSink.getCols()) { partialUpdateCols.add(col.getName()); } } TupleDescriptor olapTuple = context.generateTupleDesc(); List<Column> targetTableColumns = olapTableSink.getTargetTable().getFullSchema(); for (Column column : targetTableColumns) { if (isPartialUpdate && !partialUpdateCols.contains(column.getName())) { continue; } SlotDescriptor slotDesc = context.addSlotDesc(olapTuple); slotDesc.setIsMaterialized(true); slotDesc.setType(column.getType()); slotDesc.setColumn(column); slotDesc.setIsNullable(column.isAllowNull()); slotDesc.setAutoInc(column.isAutoInc()); } OlapTableSink sink; if (context.getConnectContext().isGroupCommitStreamLoadSql()) { sink = new GroupCommitBlockSink(olapTableSink.getTargetTable(), olapTuple, olapTableSink.getTargetTable().getPartitionIds(), olapTableSink.isSingleReplicaLoad(), context.getSessionVariable().getGroupCommit(), 0); } else { sink = new OlapTableSink( olapTableSink.getTargetTable(), olapTuple, olapTableSink.getPartitionIds().isEmpty() ? null : olapTableSink.getPartitionIds(), olapTableSink.isSingleReplicaLoad() ); } sink.setPartialUpdateInputColumns(isPartialUpdate, partialUpdateCols); rootFragment.setSink(sink); return rootFragment; } @Override public PlanFragment visitPhysicalHiveTableSink(PhysicalHiveTableSink<? extends Plan> hiveTableSink, PlanTranslatorContext context) { PlanFragment rootFragment = hiveTableSink.child().accept(this, context); rootFragment.setOutputPartition(DataPartition.UNPARTITIONED); TupleDescriptor hiveTuple = context.generateTupleDesc(); List<Column> targetTableColumns = hiveTableSink.getTargetTable().getFullSchema(); for (Column column : targetTableColumns) { SlotDescriptor slotDesc = context.addSlotDesc(hiveTuple); slotDesc.setIsMaterialized(true); slotDesc.setType(column.getType()); slotDesc.setColumn(column); slotDesc.setIsNullable(column.isAllowNull()); slotDesc.setAutoInc(column.isAutoInc()); } HiveTableSink sink = new HiveTableSink(hiveTableSink.getTargetTable()); rootFragment.setSink(sink); return rootFragment; } @Override public PlanFragment visitPhysicalFileSink(PhysicalFileSink<? extends Plan> fileSink, PlanTranslatorContext context) { PlanFragment sinkFragment = fileSink.child().accept(this, context); OutFileClause outFile = new OutFileClause( fileSink.getFilePath(), fileSink.getFormat(), fileSink.getProperties() ); List<Expr> outputExprs = Lists.newArrayList(); fileSink.getOutput().stream().map(Slot::getExprId) .forEach(exprId -> outputExprs.add(context.findSlotRef(exprId))); sinkFragment.setOutputExprs(outputExprs); List<String> labels = fileSink.getOutput().stream().map(NamedExpression::getName).collect(Collectors.toList()); try { outFile.analyze(null, outputExprs, labels); } catch (Exception e) { throw new AnalysisException(e.getMessage(), e.getCause()); } ResultFileSink resultFileSink = new ResultFileSink(sinkFragment.getPlanRoot().getId(), outFile, (ArrayList<String>) labels); sinkFragment.setSink(resultFileSink); if (fileSink.requestProperties(context.getConnectContext()).equals(PhysicalProperties.GATHER)) { return sinkFragment; } else { TupleDescriptor fileStatusDesc = ResultFileSink.constructFileStatusTupleDesc(context.getDescTable()); ExchangeNode exchangeNode = new ExchangeNode(context.nextPlanNodeId(), sinkFragment.getPlanRoot()); exchangeNode.setPartitionType(TPartitionType.UNPARTITIONED); exchangeNode.setNumInstances(1); TResultSinkType resultSinkType = context.getConnectContext() != null ? context.getConnectContext().getResultSinkType() : null; ResultSink resultSink = new ResultSink(exchangeNode.getId(), resultSinkType); PlanFragment topFragment = new PlanFragment(context.nextFragmentId(), exchangeNode, DataPartition.UNPARTITIONED); topFragment.addChild(sinkFragment); topFragment.setSink(resultSink); context.addPlanFragment(topFragment); DataStreamSink streamSink = new DataStreamSink(exchangeNode.getId()); streamSink.setOutputPartition(DataPartition.UNPARTITIONED); resultFileSink.resetByDataStreamSink(streamSink); resultFileSink.setOutputTupleId(fileStatusDesc.getId()); sinkFragment.setDestination(exchangeNode); exchangeNode.resetTupleIds(Lists.newArrayList(fileStatusDesc.getId())); topFragment.resetOutputExprs(fileStatusDesc); return topFragment; } } /* ******************************************************************************************** * scan Node, in lexicographical order * ******************************************************************************************** */ @Override public PlanFragment visitPhysicalFileScan(PhysicalFileScan fileScan, PlanTranslatorContext context) { List<Slot> slots = fileScan.getOutput(); ExternalTable table = fileScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); ScanNode scanNode; if (table instanceof HMSExternalTable) { switch (((HMSExternalTable) table).getDlaType()) { case HUDI: scanNode = new HudiScanNode(context.nextPlanNodeId(), tupleDescriptor, false); break; case ICEBERG: scanNode = new IcebergScanNode(context.nextPlanNodeId(), tupleDescriptor, false); break; case HIVE: scanNode = new HiveScanNode(context.nextPlanNodeId(), tupleDescriptor, false); HiveScanNode hiveScanNode = (HiveScanNode) scanNode; hiveScanNode.setSelectedPartitions(fileScan.getSelectedPartitions()); if (fileScan.getTableSample().isPresent()) { hiveScanNode.setTableSample(new TableSample(fileScan.getTableSample().get().isPercent, fileScan.getTableSample().get().sampleValue, fileScan.getTableSample().get().seek)); } break; default: throw new RuntimeException("do not support DLA type " + ((HMSExternalTable) table).getDlaType()); } } else if (table instanceof IcebergExternalTable) { scanNode = new IcebergScanNode(context.nextPlanNodeId(), tupleDescriptor, false); } else if (table instanceof PaimonExternalTable) { scanNode = new PaimonScanNode(context.nextPlanNodeId(), tupleDescriptor, false); } else if (table instanceof TrinoConnectorExternalTable) { scanNode = new TrinoConnectorScanNode(context.nextPlanNodeId(), tupleDescriptor, false); } else if (table instanceof MaxComputeExternalTable) { scanNode = new MaxComputeScanNode(context.nextPlanNodeId(), tupleDescriptor, false); } else { throw new RuntimeException("do not support table type " + table.getType()); } scanNode.setNereidsId(fileScan.getId()); scanNode.addConjuncts(translateToLegacyConjuncts(fileScan.getConjuncts())); scanNode.setPushDownAggNoGrouping(context.getRelationPushAggOp(fileScan.getRelationId())); TableName tableName = new TableName(null, "", ""); TableRef ref = new TableRef(tableName, null, null); BaseTableRef tableRef = new BaseTableRef(ref, table, tableName); tupleDescriptor.setRef(tableRef); if (fileScan.getStats() != null) { scanNode.setCardinality((long) fileScan.getStats().getRowCount()); } Utils.execWithUncheckedException(scanNode::init); context.addScanNode(scanNode); ScanNode finalScanNode = scanNode; context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(fileScan).forEach( expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, finalScanNode, context) ) ); Utils.execWithUncheckedException(scanNode::finalizeForNereids); DataPartition dataPartition = DataPartition.RANDOM; PlanFragment planFragment = createPlanFragment(scanNode, dataPartition, fileScan); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), fileScan); return planFragment; } @Override public PlanFragment visitPhysicalEmptyRelation(PhysicalEmptyRelation emptyRelation, PlanTranslatorContext context) { List<Slot> output = emptyRelation.getOutput(); TupleDescriptor tupleDescriptor = generateTupleDesc(output, null, context); for (Slot slot : output) { SlotRef slotRef = context.findSlotRef(slot.getExprId()); slotRef.setLabel(slot.getName()); } ArrayList<TupleId> tupleIds = new ArrayList<>(); tupleIds.add(tupleDescriptor.getId()); EmptySetNode emptySetNode = new EmptySetNode(context.nextPlanNodeId(), tupleIds); emptySetNode.setNereidsId(emptyRelation.getId()); PlanFragment planFragment = createPlanFragment(emptySetNode, DataPartition.UNPARTITIONED, emptyRelation); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), emptyRelation); return planFragment; } @Override public PlanFragment visitPhysicalEsScan(PhysicalEsScan esScan, PlanTranslatorContext context) { List<Slot> slots = esScan.getOutput(); ExternalTable table = esScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); EsScanNode esScanNode = new EsScanNode(context.nextPlanNodeId(), tupleDescriptor, true); esScanNode.setNereidsId(esScan.getId()); esScanNode.addConjuncts(translateToLegacyConjuncts(esScan.getConjuncts())); Utils.execWithUncheckedException(esScanNode::init); context.addScanNode(esScanNode); context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(esScan).forEach( expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, esScanNode, context) ) ); Utils.execWithUncheckedException(esScanNode::finalizeForNereids); DataPartition dataPartition = DataPartition.RANDOM; PlanFragment planFragment = new PlanFragment(context.nextFragmentId(), esScanNode, dataPartition); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), esScan); return planFragment; } @Override public PlanFragment visitPhysicalJdbcScan(PhysicalJdbcScan jdbcScan, PlanTranslatorContext context) { List<Slot> slots = jdbcScan.getOutput(); TableIf table = jdbcScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); JdbcScanNode jdbcScanNode = new JdbcScanNode(context.nextPlanNodeId(), tupleDescriptor, table instanceof JdbcExternalTable); jdbcScanNode.setNereidsId(jdbcScan.getId()); jdbcScanNode.addConjuncts(translateToLegacyConjuncts(jdbcScan.getConjuncts())); Utils.execWithUncheckedException(jdbcScanNode::init); context.addScanNode(jdbcScanNode); context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(jdbcScan).forEach( expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, jdbcScanNode, context) ) ); Utils.execWithUncheckedException(jdbcScanNode::finalizeForNereids); DataPartition dataPartition = DataPartition.RANDOM; PlanFragment planFragment = new PlanFragment(context.nextFragmentId(), jdbcScanNode, dataPartition); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), jdbcScan); return planFragment; } @Override public PlanFragment visitPhysicalOdbcScan(PhysicalOdbcScan odbcScan, PlanTranslatorContext context) { List<Slot> slots = odbcScan.getOutput(); TableIf table = odbcScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); OdbcScanNode odbcScanNode = new OdbcScanNode(context.nextPlanNodeId(), tupleDescriptor, (OdbcTable) table); odbcScanNode.setNereidsId(odbcScan.getId()); odbcScanNode.addConjuncts(translateToLegacyConjuncts(odbcScan.getConjuncts())); Utils.execWithUncheckedException(odbcScanNode::init); context.addScanNode(odbcScanNode); context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(odbcScan).forEach( expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, odbcScanNode, context) ) ); Utils.execWithUncheckedException(odbcScanNode::finalizeForNereids); DataPartition dataPartition = DataPartition.RANDOM; PlanFragment planFragment = new PlanFragment(context.nextFragmentId(), odbcScanNode, dataPartition); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), odbcScan); return planFragment; } @Override public PlanFragment visitPhysicalOlapScan(PhysicalOlapScan olapScan, PlanTranslatorContext context) { List<Slot> slots = olapScan.getOutput(); OlapTable olapTable = olapScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, olapTable, context); if (olapScan.getSelectedIndexId() != olapScan.getTable().getBaseIndexId()) { generateTupleDesc(olapScan.getBaseOutputs(), olapTable, context); } OlapScanNode olapScanNode = new OlapScanNode(context.nextPlanNodeId(), tupleDescriptor, "OlapScanNode"); olapScanNode.setNereidsId(olapScan.getId()); if (olapScan.getStats() != null) { if (context.getSessionVariable() != null && context.getSessionVariable().forbidUnknownColStats) { for (int i = 0; i < slots.size(); i++) { SlotReference slot = (SlotReference) slots.get(i); boolean inVisibleCol = slot.getColumn().isPresent() && StatisticConstants.shouldIgnoreCol(olapTable, slot.getColumn().get()); if (olapScan.getStats().findColumnStatistics(slot).isUnKnown() && !isComplexDataType(slot.getDataType()) && !StatisticConstants.isSystemTable(olapTable) && !inVisibleCol) { context.addUnknownStatsColumn(olapScanNode, tupleDescriptor.getSlots().get(i).getId()); } } } } TableName tableName = new TableName(null, "", ""); TableRef ref = new TableRef(tableName, null, null); BaseTableRef tableRef = new BaseTableRef(ref, olapTable, tableName); tupleDescriptor.setRef(tableRef); olapScanNode.setSelectedPartitionIds(olapScan.getSelectedPartitionIds()); olapScanNode.setSampleTabletIds(olapScan.getSelectedTabletIds()); if (olapScan.getTableSample().isPresent()) { olapScanNode.setTableSample(new TableSample(olapScan.getTableSample().get().isPercent, olapScan.getTableSample().get().sampleValue, olapScan.getTableSample().get().seek)); } switch (olapScan.getTable().getKeysType()) { case AGG_KEYS: case UNIQUE_KEYS: case DUP_KEYS: PreAggStatus preAgg = olapScan.getPreAggStatus(); olapScanNode.setSelectedIndexInfo(olapScan.getSelectedIndexId(), preAgg.isOn(), preAgg.getOffReason()); break; default: throw new RuntimeException("Not supported key type: " + olapScan.getTable().getKeysType()); } Utils.execWithUncheckedException(olapScanNode::init); context.addScanNode(olapScanNode); context.getRuntimeTranslator().ifPresent( runtimeFilterTranslator -> runtimeFilterTranslator.getContext().getTargetListByScan(olapScan) .forEach(expr -> runtimeFilterTranslator.translateRuntimeFilterTarget( expr, olapScanNode, context) ) ); olapScanNode.setPushDownAggNoGrouping(context.getRelationPushAggOp(olapScan.getRelationId())); if (context.getTopnFilterContext().isTopnFilterTarget(olapScan)) { olapScanNode.setUseTopnOpt(true); context.getTopnFilterContext().addLegacyTarget(olapScan, olapScanNode); } olapScanNode.finalizeForNereids(); DataPartition dataPartition = DataPartition.RANDOM; if (olapScan.getDistributionSpec() instanceof DistributionSpecHash) { DistributionSpecHash distributionSpecHash = (DistributionSpecHash) olapScan.getDistributionSpec(); List<Expr> partitionExprs = distributionSpecHash.getOrderedShuffledColumns().stream() .map(context::findSlotRef).collect(Collectors.toList()); dataPartition = new DataPartition(TPartitionType.HASH_PARTITIONED, partitionExprs); } PlanFragment planFragment = createPlanFragment(olapScanNode, dataPartition, olapScan); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), olapScan); return planFragment; } @Override public PlanFragment visitPhysicalDeferMaterializeOlapScan( PhysicalDeferMaterializeOlapScan deferMaterializeOlapScan, PlanTranslatorContext context) { PlanFragment planFragment = visitPhysicalOlapScan(deferMaterializeOlapScan.getPhysicalOlapScan(), context); OlapScanNode olapScanNode = (OlapScanNode) planFragment.getPlanRoot(); if (context.getTopnFilterContext().isTopnFilterTarget(deferMaterializeOlapScan)) { olapScanNode.setUseTopnOpt(true); context.getTopnFilterContext().addLegacyTarget(deferMaterializeOlapScan, olapScanNode); } TupleDescriptor tupleDescriptor = context.getTupleDesc(olapScanNode.getTupleId()); for (SlotDescriptor slotDescriptor : tupleDescriptor.getSlots()) { if (deferMaterializeOlapScan.getDeferMaterializeSlotIds() .contains(context.findExprId(slotDescriptor.getId()))) { slotDescriptor.setNeedMaterialize(false); } } context.createSlotDesc(tupleDescriptor, deferMaterializeOlapScan.getColumnIdSlot()); return planFragment; } @Override public PlanFragment visitPhysicalOneRowRelation(PhysicalOneRowRelation oneRowRelation, PlanTranslatorContext context) { List<Slot> slots = oneRowRelation.getLogicalProperties().getOutput(); TupleDescriptor oneRowTuple = generateTupleDesc(slots, null, context); List<Expr> legacyExprs = oneRowRelation.getProjects() .stream() .map(expr -> ExpressionTranslator.translate(expr, context)) .collect(Collectors.toList()); for (int i = 0; i < legacyExprs.size(); i++) { SlotDescriptor slotDescriptor = oneRowTuple.getSlots().get(i); Expr expr = legacyExprs.get(i); slotDescriptor.setSourceExpr(expr); slotDescriptor.setIsNullable(slots.get(i).nullable()); } UnionNode unionNode = new UnionNode(context.nextPlanNodeId(), oneRowTuple.getId()); unionNode.setNereidsId(oneRowRelation.getId()); unionNode.setCardinality(1L); unionNode.addConstExprList(legacyExprs); unionNode.finalizeForNereids(oneRowTuple.getSlots(), new ArrayList<>()); PlanFragment planFragment = createPlanFragment(unionNode, DataPartition.UNPARTITIONED, oneRowRelation); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), oneRowRelation); return planFragment; } @Override public PlanFragment visitPhysicalSchemaScan(PhysicalSchemaScan schemaScan, PlanTranslatorContext context) { TableIf table = schemaScan.getTable(); List<Slot> slots = ImmutableList.copyOf(schemaScan.getOutput()); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); SchemaScanNode scanNode = null; if (BackendPartitionedSchemaScanNode.isBackendPartitionedSchemaTable( table.getName())) { scanNode = new BackendPartitionedSchemaScanNode(context.nextPlanNodeId(), tupleDescriptor); } else { scanNode = new SchemaScanNode(context.nextPlanNodeId(), tupleDescriptor); } scanNode.setNereidsId(schemaScan.getId()); SchemaScanNode finalScanNode = scanNode; context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(schemaScan) .forEach(expr -> runtimeFilterGenerator .translateRuntimeFilterTarget(expr, finalScanNode, context) ) ); Utils.execWithUncheckedException(scanNode::finalizeForNereids); context.addScanNode(scanNode); PlanFragment planFragment = createPlanFragment(scanNode, DataPartition.RANDOM, schemaScan); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), schemaScan); return planFragment; } @Override public PlanFragment visitPhysicalTVFRelation(PhysicalTVFRelation tvfRelation, PlanTranslatorContext context) { List<Slot> slots = tvfRelation.getLogicalProperties().getOutput(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, tvfRelation.getFunction().getTable(), context); TableValuedFunctionIf catalogFunction = tvfRelation.getFunction().getCatalogFunction(); ScanNode scanNode = catalogFunction.getScanNode(context.nextPlanNodeId(), tupleDescriptor); scanNode.setNereidsId(tvfRelation.getId()); Utils.execWithUncheckedException(scanNode::init); context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(tvfRelation) .forEach(expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, scanNode, context) ) ); Utils.execWithUncheckedException(scanNode::finalizeForNereids); context.addScanNode(scanNode); for (Slot slot : slots) { String tableColumnName = TableValuedFunctionIf.TVF_TABLE_PREFIX + tvfRelation.getFunction().getName() + "." + slot.getName(); context.findSlotRef(slot.getExprId()).setLabel(tableColumnName); } PlanFragment planFragment = createPlanFragment(scanNode, DataPartition.RANDOM, tvfRelation); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), tvfRelation); return planFragment; } /* ******************************************************************************************** * other Node, in lexicographical order, ignore algorithm name. for example, HashAggregate -> Aggregate * ******************************************************************************************** */ /** * Translate Agg. */ @Override public PlanFragment visitPhysicalHashAggregate( PhysicalHashAggregate<? extends Plan> aggregate, PlanTranslatorContext context) { PlanFragment inputPlanFragment = aggregate.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(aggregate.child(0)); List<Expression> groupByExpressions = aggregate.getGroupByExpressions(); List<NamedExpression> outputExpressions = aggregate.getOutputExpressions(); List<SlotReference> groupSlots = collectGroupBySlots(groupByExpressions, outputExpressions); ArrayList<Expr> execGroupingExpressions = groupByExpressions.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toCollection(ArrayList::new)); List<Slot> aggFunctionOutput = Lists.newArrayList(); List<AggregateExpression> aggregateExpressionList = outputExpressions.stream() .filter(o -> o.anyMatch(AggregateExpression.class::isInstance)) .peek(o -> aggFunctionOutput.add(o.toSlot())) .map(o -> o.<Set<AggregateExpression>>collect(AggregateExpression.class::isInstance)) .flatMap(Set::stream) .collect(Collectors.toList()); ArrayList<FunctionCallExpr> execAggregateFunctions = aggregateExpressionList.stream() .map(aggregateFunction -> (FunctionCallExpr) ExpressionTranslator.translate(aggregateFunction, context)) .collect(Collectors.toCollection(ArrayList::new)); List<Slot> slotList = Lists.newArrayList(); TupleDescriptor outputTupleDesc; slotList.addAll(groupSlots); slotList.addAll(aggFunctionOutput); outputTupleDesc = generateTupleDesc(slotList, null, context); List<Integer> aggFunOutputIds = ImmutableList.of(); if (!aggFunctionOutput.isEmpty()) { aggFunOutputIds = outputTupleDesc .getSlots() .subList(groupSlots.size(), outputTupleDesc.getSlots().size()) .stream() .map(slot -> slot.getId().asInt()) .collect(ImmutableList.toImmutableList()); } boolean isPartial = aggregate.getAggregateParam().aggMode.productAggregateBuffer; AggregateInfo aggInfo = AggregateInfo.create(execGroupingExpressions, execAggregateFunctions, aggFunOutputIds, isPartial, outputTupleDesc, outputTupleDesc, aggregate.getAggPhase().toExec()); AggregationNode aggregationNode = new AggregationNode(context.nextPlanNodeId(), inputPlanFragment.getPlanRoot(), aggInfo); aggregationNode.setChildrenDistributeExprLists(distributeExprLists); aggregationNode.setNereidsId(aggregate.getId()); if (!aggregate.getAggMode().isFinalPhase) { aggregationNode.unsetNeedsFinalize(); } switch (aggregate.getAggPhase()) { case LOCAL: break; case DISTINCT_LOCAL: aggregationNode.setIntermediateTuple(); break; case GLOBAL: case DISTINCT_GLOBAL: break; default: throw new RuntimeException("Unsupported agg phase: " + aggregate.getAggPhase()); } PhysicalHashAggregate firstAggregateInFragment = context.getFirstAggregateInFragment(inputPlanFragment); if (firstAggregateInFragment == null) { context.setFirstAggregateInFragment(inputPlanFragment, aggregate); } PlanNode leftMostNode = inputPlanFragment.getPlanRoot(); while (leftMostNode.getChildren().size() != 0 && !(leftMostNode instanceof ExchangeNode)) { leftMostNode = leftMostNode.getChild(0); } if (leftMostNode instanceof OlapScanNode && inputPlanFragment.getDataPartition().getType() != TPartitionType.RANDOM && aggregate.getAggregateParam().aggMode != AggMode.INPUT_TO_BUFFER) { inputPlanFragment.setHasColocatePlanNode(true); aggregationNode.setColocate(true); } setPlanRoot(inputPlanFragment, aggregationNode, aggregate); if (aggregate.getStats() != null) { aggregationNode.setCardinality((long) aggregate.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(inputPlanFragment.getPlanRoot(), aggregate); return inputPlanFragment; } @Override public PlanFragment visitPhysicalStorageLayerAggregate( PhysicalStorageLayerAggregate storageLayerAggregate, PlanTranslatorContext context) { Preconditions.checkState((storageLayerAggregate.getRelation() instanceof PhysicalOlapScan || storageLayerAggregate.getRelation() instanceof PhysicalFileScan), "PhysicalStorageLayerAggregate only support PhysicalOlapScan and PhysicalFileScan: " + storageLayerAggregate.getRelation().getClass().getName()); TPushAggOp pushAggOp; switch (storageLayerAggregate.getAggOp()) { case COUNT: pushAggOp = TPushAggOp.COUNT; break; case COUNT_ON_MATCH: pushAggOp = TPushAggOp.COUNT_ON_INDEX; break; case MIN_MAX: pushAggOp = TPushAggOp.MINMAX; break; case MIX: pushAggOp = TPushAggOp.MIX; break; default: throw new AnalysisException("Unsupported storage layer aggregate: " + storageLayerAggregate.getAggOp()); } context.setRelationPushAggOp( storageLayerAggregate.getRelation().getRelationId(), pushAggOp); PlanFragment planFragment = storageLayerAggregate.getRelation().accept(this, context); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), storageLayerAggregate); return planFragment; } @Override public PlanFragment visitPhysicalAssertNumRows(PhysicalAssertNumRows<? extends Plan> assertNumRows, PlanTranslatorContext context) { PlanFragment currentFragment = assertNumRows.child().accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(assertNumRows.child()); TupleDescriptor tupleDescriptor = context.generateTupleDesc(); AssertNumRowsNode assertNumRowsNode = new AssertNumRowsNode(context.nextPlanNodeId(), currentFragment.getPlanRoot(), ExpressionTranslator.translateAssert(assertNumRows.getAssertNumRowsElement()), true, tupleDescriptor); assertNumRowsNode.setChildrenDistributeExprLists(distributeExprLists); assertNumRowsNode.setNereidsId(assertNumRows.getId()); List<TupleDescriptor> childTuples = context.getTupleDesc(currentFragment.getPlanRoot()); List<SlotDescriptor> childSlotDescriptors = childTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); Map<ExprId, SlotReference> childOutputMap = Maps.newHashMap(); assertNumRows.child().getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> childOutputMap.put(s.getExprId(), s)); List<SlotDescriptor> slotDescriptors = Lists.newArrayList(); for (SlotDescriptor slot : childSlotDescriptors) { SlotReference sf = childOutputMap.get(context.findExprId(slot.getId())); SlotDescriptor sd = context.createSlotDesc(tupleDescriptor, sf, slot.getParent().getTable()); slotDescriptors.add(sd); } slotDescriptors.forEach(sd -> sd.setIsNullable(true)); addPlanRoot(currentFragment, assertNumRowsNode, assertNumRows); return currentFragment; } /** * NOTICE: Must translate left, which it's the producer of consumer. */ @Override public PlanFragment visitPhysicalCTEAnchor(PhysicalCTEAnchor<? extends Plan, ? extends Plan> cteAnchor, PlanTranslatorContext context) { cteAnchor.child(0).accept(this, context); return cteAnchor.child(1).accept(this, context); } @Override public PlanFragment visitPhysicalCTEConsumer(PhysicalCTEConsumer cteConsumer, PlanTranslatorContext context) { CTEId cteId = cteConsumer.getCteId(); MultiCastPlanFragment multiCastFragment = (MultiCastPlanFragment) context.getCteProduceFragments().get(cteId); Preconditions.checkState(multiCastFragment.getSink() instanceof MultiCastDataSink, "invalid multiCastFragment"); MultiCastDataSink multiCastDataSink = (MultiCastDataSink) multiCastFragment.getSink(); Preconditions.checkState(multiCastDataSink != null, "invalid multiCastDataSink"); PhysicalCTEProducer<?> cteProducer = context.getCteProduceMap().get(cteId); Preconditions.checkState(cteProducer != null, "invalid cteProducer"); context.getCteConsumerMap().put(cteId, cteConsumer); DataStreamSink streamSink = new DataStreamSink(); streamSink.setFragment(multiCastFragment); multiCastDataSink.getDataStreamSinks().add(streamSink); multiCastDataSink.getDestinations().add(Lists.newArrayList()); TupleDescriptor tupleDescriptor = null; for (Slot producerSlot : cteProducer.getOutput()) { Slot consumerSlot = cteConsumer.getProducerToConsumerSlotMap().get(producerSlot); SlotRef slotRef = context.findSlotRef(producerSlot.getExprId()); tupleDescriptor = slotRef.getDesc().getParent(); context.addExprIdSlotRefPair(consumerSlot.getExprId(), slotRef); } CTEScanNode cteScanNode = new CTEScanNode(tupleDescriptor); context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> runtimeFilterTranslator.getContext().getTargetListByScan(cteConsumer).forEach( expr -> runtimeFilterTranslator.translateRuntimeFilterTarget(expr, cteScanNode, context))); context.getCteScanNodeMap().put(multiCastFragment.getFragmentId(), cteScanNode); return multiCastFragment; } @Override public PlanFragment visitPhysicalCTEProducer(PhysicalCTEProducer<? extends Plan> cteProducer, PlanTranslatorContext context) { PlanFragment child = cteProducer.child().accept(this, context); CTEId cteId = cteProducer.getCteId(); context.getPlanFragments().remove(child); MultiCastPlanFragment multiCastPlanFragment = new MultiCastPlanFragment(child); MultiCastDataSink multiCastDataSink = new MultiCastDataSink(); multiCastPlanFragment.setSink(multiCastDataSink); List<Expr> outputs = cteProducer.getOutput().stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); multiCastPlanFragment.setOutputExprs(outputs); context.getCteProduceFragments().put(cteId, multiCastPlanFragment); context.getCteProduceMap().put(cteId, cteProducer); if (context.getRuntimeTranslator().isPresent()) { context.getRuntimeTranslator().get().getContext().getCteProduceMap().put(cteId, cteProducer); } context.getPlanFragments().add(multiCastPlanFragment); return child; } @Override public PlanFragment visitPhysicalFilter(PhysicalFilter<? extends Plan> filter, PlanTranslatorContext context) { if (filter.child(0) instanceof AbstractPhysicalJoin) { AbstractPhysicalJoin<?, ?> join = (AbstractPhysicalJoin<?, ?>) filter.child(); join.addFilterConjuncts(filter.getConjuncts()); } PlanFragment inputFragment = filter.child(0).accept(this, context); if (inputFragment instanceof MultiCastPlanFragment) { MultiCastDataSink multiCastDataSink = (MultiCastDataSink) inputFragment.getSink(); DataStreamSink dataStreamSink = multiCastDataSink.getDataStreamSinks().get( multiCastDataSink.getDataStreamSinks().size() - 1); filter.getConjuncts().stream() .map(e -> ExpressionTranslator.translate(e, context)) .forEach(dataStreamSink::addConjunct); return inputFragment; } PlanNode planNode = inputFragment.getPlanRoot(); Plan child = filter.child(); while (child instanceof PhysicalLimit) { child = ((PhysicalLimit<?>) child).child(); } if (planNode instanceof ExchangeNode || planNode instanceof SortNode || planNode instanceof UnionNode || (child instanceof PhysicalProject && !((PhysicalProject<?>) child).hasPushedDownToProjectionFunctions())) { SelectNode selectNode = new SelectNode(context.nextPlanNodeId(), planNode); selectNode.setNereidsId(filter.getId()); addConjunctsToPlanNode(filter, selectNode, context); addPlanRoot(inputFragment, selectNode, filter); } else { if (!(filter.child(0) instanceof AbstractPhysicalJoin)) { addConjunctsToPlanNode(filter, planNode, context); updateLegacyPlanIdToPhysicalPlan(inputFragment.getPlanRoot(), filter); } } if (filter.getStats() != null) { inputFragment.getPlanRoot().setCardinalityAfterFilter((long) filter.getStats().getRowCount()); } return inputFragment; } @Override public PlanFragment visitPhysicalGenerate(PhysicalGenerate<? extends Plan> generate, PlanTranslatorContext context) { PlanFragment currentFragment = generate.child().accept(this, context); ArrayList<Expr> functionCalls = generate.getGenerators().stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toCollection(ArrayList::new)); TupleDescriptor tupleDescriptor = generateTupleDesc(generate.getGeneratorOutput(), null, context); List<TupleId> childOutputTupleIds = currentFragment.getPlanRoot().getOutputTupleIds(); if (childOutputTupleIds == null || childOutputTupleIds.isEmpty()) { childOutputTupleIds = currentFragment.getPlanRoot().getTupleIds(); } List<SlotId> outputSlotIds = Stream.concat(childOutputTupleIds.stream(), Stream.of(tupleDescriptor.getId())) .map(id -> context.getTupleDesc(id).getSlots()) .flatMap(List::stream) .map(SlotDescriptor::getId) .collect(Collectors.toList()); TableFunctionNode tableFunctionNode = new TableFunctionNode(context.nextPlanNodeId(), currentFragment.getPlanRoot(), tupleDescriptor.getId(), functionCalls, outputSlotIds); tableFunctionNode.setNereidsId(generate.getId()); addPlanRoot(currentFragment, tableFunctionNode, generate); return currentFragment; } /** * the contract of hash join node with BE * 1. hash join contains 3 types of predicates: * a. equal join conjuncts * b. other join conjuncts * c. other predicates (denoted by filter conjuncts in the rest of comments) * <p> * 2. hash join contains 3 tuple descriptors * a. input tuple descriptors, corresponding to the left child output and right child output. * If its column is selected, it will be displayed in explain by `tuple ids`. * for example, select L.* from L join R on ..., because no column from R are selected, tuple ids only * contains output tuple of L. * equal join conjuncts is bound on input tuple descriptors. * <p> * b.intermediate tuple. * This tuple describes schema of the output block after evaluating equal join conjuncts * and other join conjuncts. * <p> * Other join conjuncts currently is bound on intermediate tuple. There are some historical reason, and it * should be bound on input tuple in the future. * <p> * filter conjuncts will be evaluated on the intermediate tuple. That means the input block of filter is * described by intermediate tuple, and hence filter conjuncts should be bound on intermediate tuple. * <p> * In order to be compatible with old version, intermediate tuple is not pruned. For example, intermediate * tuple contains all slots from both sides of children. After probing hash-table, BE does not need to * materialize all slots in intermediate tuple. The slots in HashJoinNode.hashOutputSlotIds will be * materialized by BE. If `hashOutputSlotIds` is empty, all slots will be materialized. * <p> * In case of outer join, the slots in intermediate should be set nullable. * For example, * select L.*, R.* from L left outer join R on ... * All slots from R in intermediate tuple should be nullable. * <p> * c. output tuple * This describes the schema of hash join output block. * 3. Intermediate tuple * for BE performance reason, the slots in intermediate tuple * depends on the join type and other join conjuncts. * In general, intermediate tuple contains all slots of both children, except one case. * For left-semi/left-ant (right-semi/right-semi) join without other join conjuncts, intermediate tuple * only contains left (right) children output slots. * */ @Override public PlanFragment visitPhysicalHashJoin( PhysicalHashJoin<? extends Plan, ? extends Plan> hashJoin, PlanTranslatorContext context) { Preconditions.checkArgument(hashJoin.left() instanceof PhysicalPlan, "HashJoin's left child should be PhysicalPlan"); Preconditions.checkArgument(hashJoin.right() instanceof PhysicalPlan, "HashJoin's left child should be PhysicalPlan"); PhysicalHashJoin<PhysicalPlan, PhysicalPlan> physicalHashJoin = (PhysicalHashJoin<PhysicalPlan, PhysicalPlan>) hashJoin; PlanFragment rightFragment = hashJoin.child(1).accept(this, context); PlanFragment leftFragment = hashJoin.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(physicalHashJoin.left(), physicalHashJoin.right()); if (JoinUtils.shouldNestedLoopJoin(hashJoin)) { throw new RuntimeException("Physical hash join could not execute without equal join condition."); } PlanNode leftPlanRoot = leftFragment.getPlanRoot(); PlanNode rightPlanRoot = rightFragment.getPlanRoot(); JoinType joinType = hashJoin.getJoinType(); List<Expr> execEqConjuncts = hashJoin.getHashJoinConjuncts().stream() .map(EqualPredicate.class::cast) .map(e -> JoinUtils.swapEqualToForChildrenOrder(e, hashJoin.left().getOutputSet())) .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); List<Expr> markConjuncts = ImmutableList.of(); boolean isHashJoinConjunctsEmpty = hashJoin.getHashJoinConjuncts().isEmpty(); boolean isMarkJoinConjunctsEmpty = hashJoin.getMarkJoinConjuncts().isEmpty(); JoinOperator joinOperator = JoinType.toJoinOperator(joinType); if (isHashJoinConjunctsEmpty) { Preconditions.checkState(!isMarkJoinConjunctsEmpty, "mark join conjuncts should not be empty."); markConjuncts = hashJoin.getMarkJoinConjuncts().stream() .map(EqualPredicate.class::cast) .map(e -> JoinUtils.swapEqualToForChildrenOrder(e, hashJoin.left().getOutputSet())) .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); if (joinOperator == JoinOperator.LEFT_ANTI_JOIN) { joinOperator = JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN; } else if (joinOperator == JoinOperator.LEFT_SEMI_JOIN) { joinOperator = JoinOperator.NULL_AWARE_LEFT_SEMI_JOIN; } } HashJoinNode hashJoinNode = new HashJoinNode(context.nextPlanNodeId(), leftPlanRoot, rightPlanRoot, joinOperator, execEqConjuncts, Lists.newArrayList(), markConjuncts, null, null, null, hashJoin.isMarkJoin()); hashJoinNode.setNereidsId(hashJoin.getId()); hashJoinNode.setChildrenDistributeExprLists(distributeExprLists); hashJoinNode.setUseSpecificProjections(false); PlanFragment currentFragment = connectJoinNode(hashJoinNode, leftFragment, rightFragment, context, hashJoin); if (JoinUtils.shouldColocateJoin(physicalHashJoin)) { hashJoinNode.setColocate(true, ""); leftFragment.setHasColocatePlanNode(true); } else if (JoinUtils.shouldBroadcastJoin(physicalHashJoin)) { Preconditions.checkState(rightPlanRoot instanceof ExchangeNode, "right child of broadcast join must be ExchangeNode but it is " + rightFragment.getPlanRoot()); Preconditions.checkState(rightFragment.getChildren().size() == 1, "right child of broadcast join must have 1 child, but meet " + rightFragment.getChildren().size()); ((ExchangeNode) rightPlanRoot).setRightChildOfBroadcastHashJoin(true); hashJoinNode.setDistributionMode(DistributionMode.BROADCAST); } else if (JoinUtils.shouldBucketShuffleJoin(physicalHashJoin)) { hashJoinNode.setDistributionMode(DistributionMode.BUCKET_SHUFFLE); } else { hashJoinNode.setDistributionMode(DistributionMode.PARTITIONED); } List<TupleDescriptor> leftTuples = context.getTupleDesc(leftPlanRoot); List<SlotDescriptor> leftSlotDescriptors = leftTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); List<TupleDescriptor> rightTuples = context.getTupleDesc(rightPlanRoot); List<SlotDescriptor> rightSlotDescriptors = rightTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); Map<ExprId, SlotReference> outputSlotReferenceMap = Maps.newHashMap(); hashJoin.getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); List<SlotReference> outputSlotReferences = Stream.concat(leftTuples.stream(), rightTuples.stream()) .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .map(sd -> context.findExprId(sd.getId())) .map(outputSlotReferenceMap::get) .filter(Objects::nonNull) .collect(Collectors.toList()); Map<ExprId, SlotReference> hashOutputSlotReferenceMap = Maps.newHashMap(outputSlotReferenceMap); hashJoin.getOtherJoinConjuncts() .stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .flatMap(e -> e.getInputSlots().stream()) .map(SlotReference.class::cast) .forEach(s -> hashOutputSlotReferenceMap.put(s.getExprId(), s)); if (!isHashJoinConjunctsEmpty && !isMarkJoinConjunctsEmpty) { hashJoin.getMarkJoinConjuncts() .stream() .flatMap(e -> e.getInputSlots().stream()) .map(SlotReference.class::cast) .forEach(s -> hashOutputSlotReferenceMap.put(s.getExprId(), s)); } hashJoin.getFilterConjuncts().stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .flatMap(e -> e.getInputSlots().stream()) .map(SlotReference.class::cast) .forEach(s -> hashOutputSlotReferenceMap.put(s.getExprId(), s)); Map<ExprId, SlotReference> leftChildOutputMap = Maps.newHashMap(); hashJoin.child(0).getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> leftChildOutputMap.put(s.getExprId(), s)); Map<ExprId, SlotReference> rightChildOutputMap = Maps.newHashMap(); hashJoin.child(1).getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> rightChildOutputMap.put(s.getExprId(), s)); context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> physicalHashJoin.getRuntimeFilters() .forEach(filter -> runtimeFilterTranslator.createLegacyRuntimeFilter(filter, hashJoinNode, context))); List<SlotDescriptor> leftIntermediateSlotDescriptor = Lists.newArrayList(); List<SlotDescriptor> rightIntermediateSlotDescriptor = Lists.newArrayList(); TupleDescriptor intermediateDescriptor = context.generateTupleDesc(); if (hashJoin.getOtherJoinConjuncts().isEmpty() && (isHashJoinConjunctsEmpty != isMarkJoinConjunctsEmpty) && (joinType == JoinType.LEFT_ANTI_JOIN || joinType == JoinType.LEFT_SEMI_JOIN || joinType == JoinType.NULL_AWARE_LEFT_ANTI_JOIN)) { for (SlotDescriptor leftSlotDescriptor : leftSlotDescriptors) { if (!leftSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = leftChildOutputMap.get(context.findExprId(leftSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && leftSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, leftSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, leftSlotDescriptor.getParent().getTable()); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(leftSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), leftSlotDescriptor.getId()); } } leftIntermediateSlotDescriptor.add(sd); } } else if (hashJoin.getOtherJoinConjuncts().isEmpty() && (isHashJoinConjunctsEmpty != isMarkJoinConjunctsEmpty) && (joinType == JoinType.RIGHT_ANTI_JOIN || joinType == JoinType.RIGHT_SEMI_JOIN)) { for (SlotDescriptor rightSlotDescriptor : rightSlotDescriptors) { if (!rightSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = rightChildOutputMap.get(context.findExprId(rightSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && rightSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(rightSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), rightSlotDescriptor.getId()); } } rightIntermediateSlotDescriptor.add(sd); } } else { for (SlotDescriptor leftSlotDescriptor : leftSlotDescriptors) { if (!leftSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = leftChildOutputMap.get(context.findExprId(leftSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && leftSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, leftSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, leftSlotDescriptor.getParent().getTable()); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(leftSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), leftSlotDescriptor.getId()); } } leftIntermediateSlotDescriptor.add(sd); } for (SlotDescriptor rightSlotDescriptor : rightSlotDescriptors) { if (!rightSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = rightChildOutputMap.get(context.findExprId(rightSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && rightSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(rightSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), rightSlotDescriptor.getId()); } } rightIntermediateSlotDescriptor.add(sd); } } if (hashJoin.getMarkJoinSlotReference().isPresent()) { SlotReference sf = hashJoin.getMarkJoinSlotReference().get(); outputSlotReferences.add(sf); context.createSlotDesc(intermediateDescriptor, sf); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { SlotRef markJoinSlotId = context.findSlotRef(sf.getExprId()); Preconditions.checkState(markJoinSlotId != null); hashJoinNode.addSlotIdToHashOutputSlotIds(markJoinSlotId.getSlotId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), markJoinSlotId.getSlotId()); } } if (joinType == JoinType.LEFT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { rightIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); } if (joinType == JoinType.RIGHT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { leftIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); } List<Expr> otherJoinConjuncts = hashJoin.getOtherJoinConjuncts() .stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); hashJoin.getFilterConjuncts().stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .map(e -> ExpressionTranslator.translate(e, context)) .forEach(hashJoinNode::addConjunct); hashJoinNode.setOtherJoinConjuncts(otherJoinConjuncts); if (!isHashJoinConjunctsEmpty && !isMarkJoinConjunctsEmpty) { List<Expr> markJoinConjuncts = hashJoin.getMarkJoinConjuncts() .stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); hashJoinNode.setMarkJoinConjuncts(markJoinConjuncts); } hashJoinNode.setvIntermediateTupleDescList(Lists.newArrayList(intermediateDescriptor)); if (hashJoin.isShouldTranslateOutput()) { List<Expr> srcToOutput = outputSlotReferences.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); TupleDescriptor outputDescriptor = context.generateTupleDesc(); outputSlotReferences.forEach(s -> context.createSlotDesc(outputDescriptor, s)); hashJoinNode.setOutputTupleDesc(outputDescriptor); hashJoinNode.setProjectList(srcToOutput); } if (hashJoin.getStats() != null) { hashJoinNode.setCardinality((long) hashJoin.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(currentFragment.getPlanRoot(), hashJoin); return currentFragment; } @Override public PlanFragment visitPhysicalNestedLoopJoin( PhysicalNestedLoopJoin<? extends Plan, ? extends Plan> nestedLoopJoin, PlanTranslatorContext context) { PlanFragment rightFragment = nestedLoopJoin.child(1).accept(this, context); PlanFragment leftFragment = nestedLoopJoin.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(nestedLoopJoin.child(0), nestedLoopJoin.child(1)); PlanNode leftFragmentPlanRoot = leftFragment.getPlanRoot(); PlanNode rightFragmentPlanRoot = rightFragment.getPlanRoot(); if (JoinUtils.shouldNestedLoopJoin(nestedLoopJoin)) { List<TupleDescriptor> leftTuples = context.getTupleDesc(leftFragmentPlanRoot); List<TupleDescriptor> rightTuples = context.getTupleDesc(rightFragmentPlanRoot); List<TupleId> tupleIds = Stream.concat(leftTuples.stream(), rightTuples.stream()) .map(TupleDescriptor::getId) .collect(Collectors.toList()); JoinType joinType = nestedLoopJoin.getJoinType(); NestedLoopJoinNode nestedLoopJoinNode = new NestedLoopJoinNode(context.nextPlanNodeId(), leftFragmentPlanRoot, rightFragmentPlanRoot, tupleIds, JoinType.toJoinOperator(joinType), null, null, null, nestedLoopJoin.isMarkJoin()); nestedLoopJoinNode.setUseSpecificProjections(false); nestedLoopJoinNode.setNereidsId(nestedLoopJoin.getId()); nestedLoopJoinNode.setChildrenDistributeExprLists(distributeExprLists); if (nestedLoopJoin.getStats() != null) { nestedLoopJoinNode.setCardinality((long) nestedLoopJoin.getStats().getRowCount()); } nestedLoopJoinNode.setChild(0, leftFragment.getPlanRoot()); nestedLoopJoinNode.setChild(1, rightFragment.getPlanRoot()); setPlanRoot(leftFragment, nestedLoopJoinNode, nestedLoopJoin); rightFragment.getPlanRoot().setCompactData(false); context.mergePlanFragment(rightFragment, leftFragment); for (PlanFragment rightChild : rightFragment.getChildren()) { leftFragment.addChild(rightChild); } context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> { List<RuntimeFilter> filters = nestedLoopJoin.getRuntimeFilters(); filters.forEach(filter -> runtimeFilterTranslator .createLegacyRuntimeFilter(filter, nestedLoopJoinNode, context)); if (filters.stream().anyMatch(filter -> filter.getType() == TRuntimeFilterType.BITMAP)) { nestedLoopJoinNode.setOutputLeftSideOnly(true); } }); Map<ExprId, SlotReference> leftChildOutputMap = Maps.newHashMap(); nestedLoopJoin.child(0).getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> leftChildOutputMap.put(s.getExprId(), s)); Map<ExprId, SlotReference> rightChildOutputMap = Maps.newHashMap(); nestedLoopJoin.child(1).getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> rightChildOutputMap.put(s.getExprId(), s)); List<SlotDescriptor> leftIntermediateSlotDescriptor = Lists.newArrayList(); List<SlotDescriptor> rightIntermediateSlotDescriptor = Lists.newArrayList(); TupleDescriptor intermediateDescriptor = context.generateTupleDesc(); List<SlotDescriptor> leftSlotDescriptors = leftTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); List<SlotDescriptor> rightSlotDescriptors = rightTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); Map<ExprId, SlotReference> outputSlotReferenceMap = Maps.newHashMap(); nestedLoopJoin.getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); nestedLoopJoin.getFilterConjuncts().stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .flatMap(e -> e.getInputSlots().stream()) .map(SlotReference.class::cast) .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); List<SlotReference> outputSlotReferences = Stream.concat(leftTuples.stream(), rightTuples.stream()) .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .map(sd -> context.findExprId(sd.getId())) .map(outputSlotReferenceMap::get) .filter(Objects::nonNull) .collect(Collectors.toList()); for (SlotDescriptor leftSlotDescriptor : leftSlotDescriptors) { if (!leftSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = leftChildOutputMap.get(context.findExprId(leftSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && leftSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, leftSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, leftSlotDescriptor.getParent().getTable()); } leftIntermediateSlotDescriptor.add(sd); } for (SlotDescriptor rightSlotDescriptor : rightSlotDescriptors) { if (!rightSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = rightChildOutputMap.get(context.findExprId(rightSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && rightSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); } rightIntermediateSlotDescriptor.add(sd); } if (nestedLoopJoin.getMarkJoinSlotReference().isPresent()) { outputSlotReferences.add(nestedLoopJoin.getMarkJoinSlotReference().get()); context.createSlotDesc(intermediateDescriptor, nestedLoopJoin.getMarkJoinSlotReference().get()); } if (joinType == JoinType.LEFT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { rightIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); } if (joinType == JoinType.RIGHT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { leftIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); } nestedLoopJoinNode.setvIntermediateTupleDescList(Lists.newArrayList(intermediateDescriptor)); List<Expr> joinConjuncts = nestedLoopJoin.getOtherJoinConjuncts().stream() .filter(e -> !nestedLoopJoin.isBitmapRuntimeFilterCondition(e)) .map(e -> ExpressionTranslator.translate(e, context)).collect(Collectors.toList()); if (!nestedLoopJoin.isBitMapRuntimeFilterConditionsEmpty() && joinConjuncts.isEmpty()) { joinConjuncts.add(new BoolLiteral(true)); } nestedLoopJoinNode.setJoinConjuncts(joinConjuncts); if (!nestedLoopJoin.getOtherJoinConjuncts().isEmpty()) { List<Expr> markJoinConjuncts = nestedLoopJoin.getMarkJoinConjuncts().stream() .map(e -> ExpressionTranslator.translate(e, context)).collect(Collectors.toList()); nestedLoopJoinNode.setMarkJoinConjuncts(markJoinConjuncts); } nestedLoopJoin.getFilterConjuncts().stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .map(e -> ExpressionTranslator.translate(e, context)) .forEach(nestedLoopJoinNode::addConjunct); if (nestedLoopJoin.isShouldTranslateOutput()) { List<Expr> srcToOutput = outputSlotReferences.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); TupleDescriptor outputDescriptor = context.generateTupleDesc(); outputSlotReferences.forEach(s -> context.createSlotDesc(outputDescriptor, s)); nestedLoopJoinNode.setOutputTupleDesc(outputDescriptor); nestedLoopJoinNode.setProjectList(srcToOutput); } if (nestedLoopJoin.getStats() != null) { nestedLoopJoinNode.setCardinality((long) nestedLoopJoin.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(leftFragment.getPlanRoot(), nestedLoopJoin); return leftFragment; } else { throw new RuntimeException("Physical nested loop join could not execute with equal join condition."); } } @Override public PlanFragment visitPhysicalLimit(PhysicalLimit<? extends Plan> physicalLimit, PlanTranslatorContext context) { PlanFragment inputFragment = physicalLimit.child(0).accept(this, context); PlanNode child = inputFragment.getPlanRoot(); child.setLimit(MergeLimits.mergeLimit(physicalLimit.getLimit(), physicalLimit.getOffset(), child.getLimit())); updateLegacyPlanIdToPhysicalPlan(child, physicalLimit); return inputFragment; } @Override public PlanFragment visitPhysicalPartitionTopN(PhysicalPartitionTopN<? extends Plan> partitionTopN, PlanTranslatorContext context) { PlanFragment inputFragment = partitionTopN.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(partitionTopN.child(0)); PartitionSortNode partitionSortNode = translatePartitionSortNode( partitionTopN, inputFragment.getPlanRoot(), context); partitionSortNode.setChildrenDistributeExprLists(distributeExprLists); addPlanRoot(inputFragment, partitionSortNode, partitionTopN); if (partitionTopN.getPhase() == PartitionTopnPhase.ONE_PHASE_GLOBAL_PTOPN && findOlapScanNodesByPassExchangeAndJoinNode(inputFragment.getPlanRoot())) { inputFragment.setHasColocatePlanNode(true); } return inputFragment; } private List<Expression> getPushDownToProjectionFunctionForRewritten(NamedExpression expression) { List<Expression> targetExprList = expression.collectToList(PushDownToProjectionFunction.class::isInstance); return targetExprList.stream() .filter(PushDownToProjectionFunction::validToPushDown) .collect(Collectors.toList()); } private void registerRewrittenSlot(PhysicalProject<? extends Plan> project, OlapScanNode olapScanNode) { List<Expression> allPushDownProjectionFunctions = project.getProjects().stream() .map(this::getPushDownToProjectionFunctionForRewritten) .flatMap(List::stream) .collect(Collectors.toList()); for (Expression expr : allPushDownProjectionFunctions) { PushDownToProjectionFunction function = (PushDownToProjectionFunction) expr; if (context != null && context.getConnectContext() != null && context.getConnectContext().getStatementContext() != null) { Slot argumentSlot = function.getInputSlots().stream().findFirst().get(); Expression rewrittenSlot = PushDownToProjectionFunction.rewriteToSlot( function, (SlotReference) argumentSlot); TupleDescriptor tupleDescriptor = context.getTupleDesc(olapScanNode.getTupleId()); context.createSlotDesc(tupleDescriptor, (SlotReference) rewrittenSlot); } } } @Override /** * Returns a new fragment with a UnionNode as its root. The data partition of the * returned fragment and how the data of the child fragments is consumed depends on the * data partitions of the child fragments: * - All child fragments are unpartitioned or partitioned: The returned fragment has an * UNPARTITIONED or RANDOM data partition, respectively. The UnionNode absorbs the * plan trees of all child fragments. * - Mixed partitioned/unpartitioned child fragments: The returned fragment is * RANDOM partitioned. The plan trees of all partitioned child fragments are absorbed * into the UnionNode. All unpartitioned child fragments are connected to the * UnionNode via a RANDOM exchange, and remain unchanged otherwise. */ @Override public PlanFragment visitPhysicalSetOperation( PhysicalSetOperation setOperation, PlanTranslatorContext context) { List<PlanFragment> childrenFragments = new ArrayList<>(); for (Plan plan : setOperation.children()) { childrenFragments.add(plan.accept(this, context)); } TupleDescriptor setTuple = generateTupleDesc(setOperation.getOutput(), null, context); List<SlotDescriptor> outputSlotDescs = new ArrayList<>(setTuple.getSlots()); SetOperationNode setOperationNode; if (setOperation instanceof PhysicalUnion) { setOperationNode = new UnionNode(context.nextPlanNodeId(), setTuple.getId()); } else if (setOperation instanceof PhysicalExcept) { setOperationNode = new ExceptNode(context.nextPlanNodeId(), setTuple.getId()); } else if (setOperation instanceof PhysicalIntersect) { setOperationNode = new IntersectNode(context.nextPlanNodeId(), setTuple.getId()); } else { throw new RuntimeException("not support set operation type " + setOperation); } setOperationNode.setNereidsId(setOperation.getId()); setOperation.getRegularChildrenOutputs().stream() .map(o -> o.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(ImmutableList.toImmutableList())) .forEach(setOperationNode::addResultExprLists); if (setOperation instanceof PhysicalUnion) { ((PhysicalUnion) setOperation).getConstantExprsList().stream() .map(l -> l.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(ImmutableList.toImmutableList())) .forEach(setOperationNode::addConstExprList); } for (PlanFragment childFragment : childrenFragments) { setOperationNode.addChild(childFragment.getPlanRoot()); } setOperationNode.finalizeForNereids(outputSlotDescs, outputSlotDescs); PlanFragment setOperationFragment; if (childrenFragments.isEmpty()) { setOperationFragment = createPlanFragment(setOperationNode, DataPartition.UNPARTITIONED, setOperation); context.addPlanFragment(setOperationFragment); } else { int childrenSize = childrenFragments.size(); setOperationFragment = childrenFragments.get(childrenSize - 1); for (int i = childrenSize - 2; i >= 0; i--) { context.mergePlanFragment(childrenFragments.get(i), setOperationFragment); for (PlanFragment child : childrenFragments.get(i).getChildren()) { setOperationFragment.addChild(child); } } setPlanRoot(setOperationFragment, setOperationNode, setOperation); } if (!setOperation.getPhysicalProperties().equals(PhysicalProperties.ANY) && findOlapScanNodesByPassExchangeAndJoinNode(setOperationFragment.getPlanRoot())) { setOperationFragment.setHasColocatePlanNode(true); setOperationNode.setColocate(true); } return setOperationFragment; } /*- * Physical sort: * 1. Build sortInfo * There are two types of slotRef: * one is generated by the previous node, collectively called old. * the other is newly generated by the sort node, collectively called new. * Filling of sortInfo related data structures, * a. ordering use newSlotRef. * b. sortTupleSlotExprs use oldSlotRef. * 2. Create sortNode * 3. Create mergeFragment * TODO: When the slotRef of sort is currently generated, * it will be based on the expression in select and orderBy expression in to ensure the uniqueness of slotRef. * But eg: * select a+1 from table order by a+1; * the expressions of the two are inconsistent. * The former will perform an additional Alias. * Currently we cannot test whether this will have any effect. * After a+1 can be parsed , reprocessing. */ @Override public PlanFragment visitPhysicalQuickSort(PhysicalQuickSort<? extends Plan> sort, PlanTranslatorContext context) { PlanFragment inputFragment = sort.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(sort.child(0)); if (!sort.getSortPhase().isMerge()) { SortNode sortNode = translateSortNode(sort, inputFragment.getPlanRoot(), context); sortNode.setChildrenDistributeExprLists(distributeExprLists); addPlanRoot(inputFragment, sortNode, sort); } else { if (!(inputFragment.getPlanRoot() instanceof ExchangeNode)) { return inputFragment; } SortNode sortNode = (SortNode) inputFragment.getPlanRoot().getChild(0); ((ExchangeNode) inputFragment.getPlanRoot()).setMergeInfo(sortNode.getSortInfo()); sortNode.setMergeByExchange(); sortNode.setChildrenDistributeExprLists(distributeExprLists); } return inputFragment; } @Override public PlanFragment visitPhysicalTopN(PhysicalTopN<? extends Plan> topN, PlanTranslatorContext context) { PlanFragment inputFragment = topN.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(topN.child(0)); if (!topN.getSortPhase().isMerge()) { SortNode sortNode = translateSortNode(topN, inputFragment.getPlanRoot(), context); sortNode.setOffset(topN.getOffset()); sortNode.setLimit(topN.getLimit()); if (context.getTopnFilterContext().isTopnFilterSource(topN)) { sortNode.setUseTopnOpt(true); context.getTopnFilterContext().getTargets(topN).forEach( olapScan -> { Optional<OlapScanNode> legacyScan = context.getTopnFilterContext().getLegacyScanNode(olapScan); Preconditions.checkState(legacyScan.isPresent(), "cannot find OlapScanNode for topn filter"); legacyScan.get().addTopnFilterSortNode(sortNode); } ); } if (sortNode.getChild(0) instanceof OlapScanNode) { OlapScanNode scanNode = ((OlapScanNode) sortNode.getChild(0)); if (checkPushSort(sortNode, scanNode.getOlapTable())) { SortInfo sortInfo = sortNode.getSortInfo(); scanNode.setSortInfo(sortInfo); scanNode.getSortInfo().setSortTupleSlotExprs(sortNode.getResolvedTupleExprs()); for (Expr expr : sortInfo.getOrderingExprs()) { scanNode.getSortInfo().addMaterializedOrderingExpr(expr); } if (sortNode.getOffset() > 0) { scanNode.setSortLimit(sortNode.getLimit() + sortNode.getOffset()); } else { scanNode.setSortLimit(sortNode.getLimit()); } } } sortNode.setChildrenDistributeExprLists(distributeExprLists); addPlanRoot(inputFragment, sortNode, topN); } else { if (!(inputFragment.getPlanRoot() instanceof ExchangeNode)) { inputFragment.getPlanRoot().setOffset(topN.getOffset()); inputFragment.getPlanRoot().setLimit(topN.getLimit()); return inputFragment; } ExchangeNode exchangeNode = (ExchangeNode) inputFragment.getPlanRoot(); exchangeNode.setChildrenDistributeExprLists(distributeExprLists); exchangeNode.setMergeInfo(((SortNode) exchangeNode.getChild(0)).getSortInfo()); exchangeNode.setLimit(topN.getLimit()); exchangeNode.setOffset(topN.getOffset()); ((SortNode) exchangeNode.getChild(0)).setMergeByExchange(); } updateLegacyPlanIdToPhysicalPlan(inputFragment.getPlanRoot(), topN); return inputFragment; } @Override public PlanFragment visitPhysicalDeferMaterializeTopN(PhysicalDeferMaterializeTopN<? extends Plan> topN, PlanTranslatorContext context) { PlanFragment planFragment = visitPhysicalTopN(topN.getPhysicalTopN(), context); if (planFragment.getPlanRoot() instanceof SortNode) { SortNode sortNode = (SortNode) planFragment.getPlanRoot(); sortNode.setUseTwoPhaseReadOpt(true); sortNode.getSortInfo().setUseTwoPhaseRead(); if (context.getTopnFilterContext().isTopnFilterSource(topN)) { sortNode.setUseTopnOpt(true); context.getTopnFilterContext().getTargets(topN).forEach( olapScan -> { Optional<OlapScanNode> legacyScan = context.getTopnFilterContext().getLegacyScanNode(olapScan); Preconditions.checkState(legacyScan.isPresent(), "cannot find OlapScanNode for topn filter"); legacyScan.get().addTopnFilterSortNode(sortNode); } ); } TupleDescriptor tupleDescriptor = sortNode.getSortInfo().getSortTupleDescriptor(); for (SlotDescriptor slotDescriptor : tupleDescriptor.getSlots()) { if (topN.getDeferMaterializeSlotIds() .contains(context.findExprId(slotDescriptor.getId()))) { slotDescriptor.setNeedMaterialize(false); } } } return planFragment; } @Override public PlanFragment visitPhysicalRepeat(PhysicalRepeat<? extends Plan> repeat, PlanTranslatorContext context) { PlanFragment inputPlanFragment = repeat.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(repeat.child(0)); Set<VirtualSlotReference> sortedVirtualSlots = repeat.getSortedVirtualSlots(); TupleDescriptor virtualSlotsTuple = generateTupleDesc(ImmutableList.copyOf(sortedVirtualSlots), null, context); ImmutableSet<Expression> flattenGroupingSetExprs = ImmutableSet.copyOf( ExpressionUtils.flatExpressions(repeat.getGroupingSets())); List<Slot> aggregateFunctionUsedSlots = repeat.getOutputExpressions() .stream() .filter(output -> !(output instanceof VirtualSlotReference)) .filter(output -> !flattenGroupingSetExprs.contains(output)) .distinct() .map(NamedExpression::toSlot) .collect(ImmutableList.toImmutableList()); List<Expr> preRepeatExprs = Stream.concat(flattenGroupingSetExprs.stream(), aggregateFunctionUsedSlots.stream()) .map(expr -> ExpressionTranslator.translate(expr, context)).collect(ImmutableList.toImmutableList()); List<Slot> outputSlots = Stream.concat( repeat.getOutputExpressions().stream().filter(output -> flattenGroupingSetExprs.contains(output)), repeat.getOutputExpressions().stream().filter(output -> !flattenGroupingSetExprs.contains(output))) .map(NamedExpression::toSlot).collect(ImmutableList.toImmutableList()); TupleDescriptor outputTuple = generateTupleDesc(outputSlots, null, context); GroupingInfo groupingInfo = new GroupingInfo( GroupingType.GROUPING_SETS, virtualSlotsTuple, outputTuple, preRepeatExprs); List<Set<Integer>> repeatSlotIdList = repeat.computeRepeatSlotIdList(getSlotIds(outputTuple)); Set<Integer> allSlotId = repeatSlotIdList.stream() .flatMap(Set::stream) .collect(ImmutableSet.toImmutableSet()); RepeatNode repeatNode = new RepeatNode(context.nextPlanNodeId(), inputPlanFragment.getPlanRoot(), groupingInfo, repeatSlotIdList, allSlotId, repeat.computeVirtualSlotValues(sortedVirtualSlots)); repeatNode.setNereidsId(repeat.getId()); repeatNode.setChildrenDistributeExprLists(distributeExprLists); addPlanRoot(inputPlanFragment, repeatNode, repeat); updateLegacyPlanIdToPhysicalPlan(inputPlanFragment.getPlanRoot(), repeat); return inputPlanFragment; } @Override public PlanFragment visitPhysicalWindow(PhysicalWindow<? extends Plan> physicalWindow, PlanTranslatorContext context) { PlanFragment inputPlanFragment = physicalWindow.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(physicalWindow.child(0)); WindowFrameGroup windowFrameGroup = physicalWindow.getWindowFrameGroup(); List<Expression> partitionKeyList = Lists.newArrayList(windowFrameGroup.getPartitionKeys()); List<OrderExpression> orderKeyList = windowFrameGroup.getOrderKeys(); List<NamedExpression> windowFunctionList = windowFrameGroup.getGroups(); WindowFrame windowFrame = windowFrameGroup.getWindowFrame(); List<Expr> partitionExprs = partitionKeyList.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); List<OrderByElement> orderByElements = orderKeyList.stream() .map(orderKey -> new OrderByElement( ExpressionTranslator.translate(orderKey.child(), context), orderKey.isAsc(), orderKey.isNullFirst())) .collect(Collectors.toList()); List<Expr> analyticFnCalls = windowFunctionList.stream() .map(e -> { Expression function = e.child(0).child(0); if (function instanceof AggregateFunction) { AggregateParam param = AggregateParam.LOCAL_RESULT; function = new AggregateExpression((AggregateFunction) function, param); } return ExpressionTranslator.translate(function, context); }) .map(FunctionCallExpr.class::cast) .peek(fnCall -> { fnCall.setIsAnalyticFnCall(true); ((org.apache.doris.catalog.AggregateFunction) fnCall.getFn()).setIsAnalyticFn(true); }) .collect(Collectors.toList()); AnalyticWindow analyticWindow = physicalWindow.translateWindowFrame(windowFrame, context); Map<ExprId, SlotRef> bufferedSlotRefForWindow = getBufferedSlotRefForWindow(windowFrameGroup, context); TupleDescriptor bufferedTupleDesc = context.getBufferedTupleForWindow(); Expr partitionExprsIsNullableMatched = partitionExprs.isEmpty() ? null : windowExprsHaveMatchedNullable( partitionKeyList, partitionExprs, bufferedSlotRefForWindow); Expr orderElementsIsNullableMatched = orderByElements.isEmpty() ? null : windowExprsHaveMatchedNullable( orderKeyList.stream().map(UnaryNode::child).collect(Collectors.toList()), orderByElements.stream().map(OrderByElement::getExpr).collect(Collectors.toList()), bufferedSlotRefForWindow); List<Slot> windowSlotList = windowFunctionList.stream() .map(NamedExpression::toSlot) .collect(Collectors.toList()); TupleDescriptor outputTupleDesc = generateTupleDesc(windowSlotList, null, context); AnalyticEvalNode analyticEvalNode = new AnalyticEvalNode( context.nextPlanNodeId(), inputPlanFragment.getPlanRoot(), analyticFnCalls, partitionExprs, orderByElements, analyticWindow, outputTupleDesc, outputTupleDesc, partitionExprsIsNullableMatched, orderElementsIsNullableMatched, bufferedTupleDesc ); analyticEvalNode.setNereidsId(physicalWindow.getId()); analyticEvalNode.setChildrenDistributeExprLists(distributeExprLists); PlanNode root = inputPlanFragment.getPlanRoot(); if (root instanceof SortNode) { ((SortNode) root).setIsAnalyticSort(true); } inputPlanFragment.addPlanRoot(analyticEvalNode); if (findOlapScanNodesByPassExchangeAndJoinNode(inputPlanFragment.getPlanRoot())) { inputPlanFragment.setHasColocatePlanNode(true); analyticEvalNode.setColocate(true); if (root instanceof SortNode) { ((SortNode) root).setColocate(true); } } return inputPlanFragment; } /* ******************************************************************************************** * private functions * ******************************************************************************************** */ private PartitionSortNode translatePartitionSortNode(PhysicalPartitionTopN<? extends Plan> partitionTopN, PlanNode childNode, PlanTranslatorContext context) { List<Expr> partitionExprs = partitionTopN.getPartitionKeys().stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); TupleDescriptor sortTuple = generateTupleDesc(partitionTopN.child().getOutput(), null, context); List<Expr> orderingExprs = Lists.newArrayList(); List<Boolean> ascOrders = Lists.newArrayList(); List<Boolean> nullsFirstParams = Lists.newArrayList(); List<OrderKey> orderKeys = partitionTopN.getOrderKeys(); orderKeys.forEach(k -> { orderingExprs.add(ExpressionTranslator.translate(k.getExpr(), context)); ascOrders.add(k.isAsc()); nullsFirstParams.add(k.isNullFirst()); }); SortInfo sortInfo = new SortInfo(orderingExprs, ascOrders, nullsFirstParams, sortTuple); PartitionSortNode partitionSortNode = new PartitionSortNode(context.nextPlanNodeId(), childNode, partitionTopN.getFunction(), partitionExprs, sortInfo, partitionTopN.hasGlobalLimit(), partitionTopN.getPartitionLimit(), partitionTopN.getPhase()); partitionSortNode.setNereidsId(partitionTopN.getId()); if (partitionTopN.getStats() != null) { partitionSortNode.setCardinality((long) partitionTopN.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(partitionSortNode, partitionTopN); return partitionSortNode; } private SortNode translateSortNode(AbstractPhysicalSort<? extends Plan> sort, PlanNode childNode, PlanTranslatorContext context) { TupleDescriptor sortTuple = generateTupleDesc(sort.child().getOutput(), null, context); List<Expr> orderingExprs = Lists.newArrayList(); List<Boolean> ascOrders = Lists.newArrayList(); List<Boolean> nullsFirstParams = Lists.newArrayList(); List<OrderKey> orderKeys = sort.getOrderKeys(); orderKeys.forEach(k -> { orderingExprs.add(ExpressionTranslator.translate(k.getExpr(), context)); ascOrders.add(k.isAsc()); nullsFirstParams.add(k.isNullFirst()); }); SortInfo sortInfo = new SortInfo(orderingExprs, ascOrders, nullsFirstParams, sortTuple); SortNode sortNode = new SortNode(context.nextPlanNodeId(), childNode, sortInfo, sort instanceof PhysicalTopN); sortNode.setNereidsId(sort.getId()); if (sort.getStats() != null) { sortNode.setCardinality((long) sort.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(sortNode, sort); return sortNode; } private void updateScanSlotsMaterialization(ScanNode scanNode, Set<SlotId> requiredSlotIdSet, Set<SlotId> requiredByProjectSlotIdSet, List<SlotId> slotIdsByOrder, PlanTranslatorContext context) { SlotDescriptor smallest = scanNode.getTupleDesc().getSlots().get(0); if (CollectionUtils.isNotEmpty(slotIdsByOrder)) { Map<SlotId, SlotDescriptor> idToSlotDescMap = scanNode.getTupleDesc().getSlots().stream() .filter(s -> requiredSlotIdSet.contains(s.getId())) .collect(Collectors.toMap(SlotDescriptor::getId, s -> s)); scanNode.getTupleDesc().getSlots().clear(); for (SlotId slotId : slotIdsByOrder) { scanNode.getTupleDesc().getSlots().add(idToSlotDescMap.get(slotId)); } } else { scanNode.getTupleDesc().getSlots().removeIf(s -> !requiredSlotIdSet.contains(s.getId())); } if (scanNode.getTupleDesc().getSlots().isEmpty()) { scanNode.getTupleDesc().getSlots().add(smallest); } try { if (context.getSessionVariable() != null && context.getSessionVariable().forbidUnknownColStats && !StatisticConstants.isSystemTable(scanNode.getTupleDesc().getTable())) { for (SlotId slotId : requiredByProjectSlotIdSet) { if (context.isColumnStatsUnknown(scanNode, slotId)) { String colName = scanNode.getTupleDesc().getSlot(slotId.asInt()).getColumn().getName(); throw new AnalysisException("meet unknown column stats: " + colName); } } context.removeScanFromStatsUnknownColumnsMap(scanNode); } scanNode.updateRequiredSlots(context, requiredByProjectSlotIdSet); } catch (UserException e) { Util.logAndThrowRuntimeException(LOG, "User Exception while reset external file scan node contexts.", e); } } private void addConjunctsToPlanNode(PhysicalFilter<? extends Plan> filter, PlanNode planNode, PlanTranslatorContext context) { filter.getConjuncts().stream() .map(e -> ExpressionTranslator.translate(e, context)) .forEach(planNode::addConjunct); updateLegacyPlanIdToPhysicalPlan(planNode, filter); } private TupleDescriptor generateTupleDesc(List<Slot> slotList, TableIf table, PlanTranslatorContext context) { TupleDescriptor tupleDescriptor = context.generateTupleDesc(); tupleDescriptor.setTable(table); for (Slot slot : slotList) { context.createSlotDesc(tupleDescriptor, (SlotReference) slot, table); } return tupleDescriptor; } private PlanFragment connectJoinNode(HashJoinNode hashJoinNode, PlanFragment leftFragment, PlanFragment rightFragment, PlanTranslatorContext context, AbstractPlan join) { hashJoinNode.setChild(0, leftFragment.getPlanRoot()); hashJoinNode.setChild(1, rightFragment.getPlanRoot()); setPlanRoot(leftFragment, hashJoinNode, join); context.mergePlanFragment(rightFragment, leftFragment); for (PlanFragment rightChild : rightFragment.getChildren()) { leftFragment.addChild(rightChild); } return leftFragment; } private List<SlotReference> collectGroupBySlots(List<Expression> groupByExpressions, List<NamedExpression> outputExpressions) { List<SlotReference> groupSlots = Lists.newArrayList(); Set<VirtualSlotReference> virtualSlotReferences = groupByExpressions.stream() .filter(VirtualSlotReference.class::isInstance) .map(VirtualSlotReference.class::cast) .collect(Collectors.toSet()); for (Expression e : groupByExpressions) { if (e instanceof SlotReference && outputExpressions.stream().anyMatch(o -> o.anyMatch(e::equals))) { groupSlots.add((SlotReference) e); } else if (e instanceof SlotReference && !virtualSlotReferences.isEmpty()) { groupSlots.add((SlotReference) e); } else { groupSlots.add(new SlotReference(e.toSql(), e.getDataType(), e.nullable(), ImmutableList.of())); } } return groupSlots; } private List<Integer> getSlotIds(TupleDescriptor tupleDescriptor) { return tupleDescriptor.getSlots() .stream() .map(slot -> slot.getId().asInt()) .collect(ImmutableList.toImmutableList()); } private Map<ExprId, SlotRef> getBufferedSlotRefForWindow(WindowFrameGroup windowFrameGroup, PlanTranslatorContext context) { Map<ExprId, SlotRef> bufferedSlotRefForWindow = context.getBufferedSlotRefForWindow(); windowFrameGroup.getPartitionKeys().stream() .map(NamedExpression.class::cast) .forEach(expression -> { ExprId exprId = expression.getExprId(); bufferedSlotRefForWindow.putIfAbsent(exprId, context.findSlotRef(exprId)); }); windowFrameGroup.getOrderKeys().stream() .map(UnaryNode::child) .map(NamedExpression.class::cast) .forEach(expression -> { ExprId exprId = expression.getExprId(); bufferedSlotRefForWindow.putIfAbsent(exprId, context.findSlotRef(exprId)); }); return bufferedSlotRefForWindow; } private Expr windowExprsHaveMatchedNullable(List<Expression> expressions, List<Expr> exprs, Map<ExprId, SlotRef> bufferedSlotRef) { Map<ExprId, Expr> exprIdToExpr = Maps.newHashMap(); for (int i = 0; i < expressions.size(); i++) { NamedExpression expression = (NamedExpression) expressions.get(i); exprIdToExpr.put(expression.getExprId(), exprs.get(i)); } return windowExprsHaveMatchedNullable(exprIdToExpr, bufferedSlotRef, expressions, 0, expressions.size()); } private Expr windowExprsHaveMatchedNullable(Map<ExprId, Expr> exprIdToExpr, Map<ExprId, SlotRef> exprIdToSlotRef, List<Expression> expressions, int i, int size) { if (i > size - 1) { return new BoolLiteral(true); } ExprId exprId = ((NamedExpression) expressions.get(i)).getExprId(); Expr lhs = exprIdToExpr.get(exprId); Expr rhs = exprIdToSlotRef.get(exprId); Expr bothNull = new CompoundPredicate(CompoundPredicate.Operator.AND, new IsNullPredicate(lhs, false, true), new IsNullPredicate(rhs, false, true)); Expr lhsEqRhsNotNull = new CompoundPredicate(CompoundPredicate.Operator.AND, new CompoundPredicate(CompoundPredicate.Operator.AND, new IsNullPredicate(lhs, true, true), new IsNullPredicate(rhs, true, true)), new BinaryPredicate(BinaryPredicate.Operator.EQ, lhs, rhs, Type.BOOLEAN, NullableMode.DEPEND_ON_ARGUMENT)); Expr remainder = windowExprsHaveMatchedNullable(exprIdToExpr, exprIdToSlotRef, expressions, i + 1, size); return new CompoundPredicate(CompoundPredicate.Operator.AND, new CompoundPredicate(CompoundPredicate.Operator.OR, bothNull, lhsEqRhsNotNull), remainder); } private PlanFragment createPlanFragment(PlanNode planNode, DataPartition dataPartition, AbstractPlan physicalPlan) { PlanFragment planFragment = new PlanFragment(context.nextFragmentId(), planNode, dataPartition); updateLegacyPlanIdToPhysicalPlan(planNode, physicalPlan); return planFragment; } private void setPlanRoot(PlanFragment fragment, PlanNode planNode, AbstractPlan physicalPlan) { fragment.setPlanRoot(planNode); updateLegacyPlanIdToPhysicalPlan(planNode, physicalPlan); } private void addPlanRoot(PlanFragment fragment, PlanNode planNode, AbstractPlan physicalPlan) { fragment.addPlanRoot(planNode); updateLegacyPlanIdToPhysicalPlan(planNode, physicalPlan); } private DataPartition toDataPartition(DistributionSpec distributionSpec, List<ExprId> childOutputIds, PlanTranslatorContext context) { if (distributionSpec instanceof DistributionSpecAny || distributionSpec instanceof DistributionSpecStorageAny || distributionSpec instanceof DistributionSpecExecutionAny) { return DataPartition.RANDOM; } else if (distributionSpec instanceof DistributionSpecGather || distributionSpec instanceof DistributionSpecStorageGather || distributionSpec instanceof DistributionSpecReplicated) { return DataPartition.UNPARTITIONED; } else if (distributionSpec instanceof DistributionSpecHash) { DistributionSpecHash distributionSpecHash = (DistributionSpecHash) distributionSpec; List<Expr> partitionExprs = Lists.newArrayList(); for (int i = 0; i < distributionSpecHash.getEquivalenceExprIds().size(); i++) { Set<ExprId> equivalenceExprId = distributionSpecHash.getEquivalenceExprIds().get(i); for (ExprId exprId : equivalenceExprId) { if (childOutputIds.contains(exprId)) { partitionExprs.add(context.findSlotRef(exprId)); break; } } if (partitionExprs.size() != i + 1) { throw new RuntimeException("Cannot translate DistributionSpec to DataPartition," + " DistributionSpec: " + distributionSpec + ", child output: " + childOutputIds); } } TPartitionType partitionType; switch (distributionSpecHash.getShuffleType()) { case STORAGE_BUCKETED: partitionType = TPartitionType.BUCKET_SHFFULE_HASH_PARTITIONED; break; case EXECUTION_BUCKETED: partitionType = TPartitionType.HASH_PARTITIONED; break; case NATURAL: default: throw new RuntimeException("Do not support shuffle type: " + distributionSpecHash.getShuffleType()); } return new DataPartition(partitionType, partitionExprs); } else if (distributionSpec instanceof DistributionSpecTabletIdShuffle) { return DataPartition.TABLET_ID; } else if (distributionSpec instanceof DistributionSpecTableSinkHashPartitioned) { DistributionSpecTableSinkHashPartitioned partitionSpecHash = (DistributionSpecTableSinkHashPartitioned) distributionSpec; List<Expr> partitionExprs = Lists.newArrayList(); List<ExprId> partitionExprIds = partitionSpecHash.getOutputColExprIds(); for (ExprId partitionExprId : partitionExprIds) { if (childOutputIds.contains(partitionExprId)) { partitionExprs.add(context.findSlotRef(partitionExprId)); } } return new DataPartition(TPartitionType.TABLE_SINK_HASH_PARTITIONED, partitionExprs); } else if (distributionSpec instanceof DistributionSpecTableSinkRandomPartitioned) { return new DataPartition(TPartitionType.TABLE_SINK_RANDOM_PARTITIONED); } else { throw new RuntimeException("Unknown DistributionSpec: " + distributionSpec); } } private void updateLegacyPlanIdToPhysicalPlan(PlanNode planNode, AbstractPlan physicalPlan) { if (statsErrorEstimator != null) { statsErrorEstimator.updateLegacyPlanIdToPhysicalPlan(planNode, physicalPlan); } } private void injectRowIdColumnSlot(TupleDescriptor tupleDesc) { SlotDescriptor slotDesc = context.addSlotDesc(tupleDesc); if (LOG.isDebugEnabled()) { LOG.debug("inject slot {}", slotDesc); } String name = Column.ROWID_COL; Column col = new Column(name, Type.STRING, false, null, false, "", "rowid column"); slotDesc.setType(Type.STRING); slotDesc.setColumn(col); slotDesc.setIsNullable(false); slotDesc.setIsMaterialized(true); } /** * topN opt: using storage data ordering to accelerate topn operation. * refer pr: optimize topn query if order by columns is prefix of sort keys of table ( */ private boolean checkPushSort(SortNode sortNode, OlapTable olapTable) { if (sortNode.getLimit() <= 0 || sortNode.getLimit() > context.getSessionVariable().topnOptLimitThreshold) { return false; } if (sortNode.getSortInfo().getIsAscOrder().stream().distinct().count() != 1 || olapTable.isZOrderSort()) { return false; } List<Expr> sortExprs = sortNode.getSortInfo().getOrderingExprs(); List<Boolean> nullsFirsts = sortNode.getSortInfo().getNullsFirst(); List<Boolean> isAscOrders = sortNode.getSortInfo().getIsAscOrder(); if (sortExprs.size() > olapTable.getDataSortInfo().getColNum()) { return false; } List<Column> sortKeyColumns = olapTable.getFullSchema(); if (olapTable.getEnableUniqueKeyMergeOnWrite()) { Map<Integer, Column> clusterKeyMap = new TreeMap<>(); for (Column column : olapTable.getFullSchema()) { if (column.getClusterKeyId() != -1) { clusterKeyMap.put(column.getClusterKeyId(), column); } } if (!clusterKeyMap.isEmpty()) { sortKeyColumns.clear(); sortKeyColumns.addAll(clusterKeyMap.values()); } } for (int i = 0; i < sortExprs.size(); i++) { Column sortColumn = sortKeyColumns.get(i); Expr sortExpr = sortExprs.get(i); if (sortExpr instanceof SlotRef) { SlotRef slotRef = (SlotRef) sortExpr; if (sortColumn.equals(slotRef.getColumn())) { if (sortColumn.isAllowNull() && nullsFirsts.get(i) && !isAscOrders.get(i)) { return false; } } else { return false; } } else { return false; } } return true; } private List<Expr> translateToLegacyConjuncts(Set<Expression> conjuncts) { List<Expr> outputExprs = Lists.newArrayList(); if (conjuncts != null) { conjuncts.stream() .map(e -> ExpressionTranslator.translate(e, context)) .forEach(outputExprs::add); } return outputExprs; } private boolean isComplexDataType(DataType dataType) { return dataType instanceof ArrayType || dataType instanceof MapType || dataType instanceof JsonType || dataType instanceof StructType; } private PhysicalCTEConsumer getCTEConsumerChild(PhysicalPlan root) { if (root == null) { return null; } else if (root instanceof PhysicalCTEConsumer) { return (PhysicalCTEConsumer) root; } else if (root.children().size() != 1) { return null; } else { return getCTEConsumerChild((PhysicalPlan) root.child(0)); } } private boolean findOlapScanNodesByPassExchangeAndJoinNode(PlanNode root) { if (root instanceof OlapScanNode) { return true; } else if (!(root instanceof JoinNodeBase || root instanceof ExchangeNode)) { return root.getChildren().stream().anyMatch(child -> findOlapScanNodesByPassExchangeAndJoinNode(child)); } return false; } private List<List<Expr>> getDistributeExprs(Plan ... children) { List<List<Expr>> distributeExprLists = Lists.newArrayList(); for (Plan child : children) { DistributionSpec spec = ((PhysicalPlan) child).getPhysicalProperties().getDistributionSpec(); distributeExprLists.add(getDistributeExpr(child.getOutputExprIds(), spec)); } return distributeExprLists; } private List<Expr> getDistributeExpr(List<ExprId> childOutputIds, DistributionSpec spec) { if (spec instanceof DistributionSpecHash) { DistributionSpecHash distributionSpecHash = (DistributionSpecHash) spec; List<Expr> partitionExprs = Lists.newArrayList(); for (int i = 0; i < distributionSpecHash.getEquivalenceExprIds().size(); i++) { Set<ExprId> equivalenceExprId = distributionSpecHash.getEquivalenceExprIds().get(i); for (ExprId exprId : equivalenceExprId) { if (childOutputIds.contains(exprId)) { partitionExprs.add(context.findSlotRef(exprId)); break; } } } return partitionExprs; } return Lists.newArrayList(); } }
class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, PlanTranslatorContext> { private static final Logger LOG = LogManager.getLogger(PhysicalPlanTranslator.class); private final StatsErrorEstimator statsErrorEstimator; private final PlanTranslatorContext context; public PhysicalPlanTranslator() { this(null, null); } public PhysicalPlanTranslator(PlanTranslatorContext context) { this(context, null); } public PhysicalPlanTranslator(PlanTranslatorContext context, StatsErrorEstimator statsErrorEstimator) { this.context = context; this.statsErrorEstimator = statsErrorEstimator; } /** * Translate Nereids Physical Plan tree to Stale Planner PlanFragment tree. * * @param physicalPlan Nereids Physical Plan tree * @return Stale Planner PlanFragment tree */ public PlanFragment translatePlan(PhysicalPlan physicalPlan) { PlanFragment rootFragment = physicalPlan.accept(this, context); if (CollectionUtils.isEmpty(rootFragment.getOutputExprs())) { List<Expr> outputExprs = Lists.newArrayList(); physicalPlan.getOutput().stream().map(Slot::getExprId) .forEach(exprId -> outputExprs.add(context.findSlotRef(exprId))); rootFragment.setOutputExprs(outputExprs); } Collections.reverse(context.getPlanFragments()); context.getDescTable().computeMemLayout(); if (context.getSessionVariable() != null && context.getSessionVariable().forbidUnknownColStats) { Set<ScanNode> scans = context.getScanNodeWithUnknownColumnStats(); if (!scans.isEmpty()) { StringBuilder builder = new StringBuilder(); scans.forEach(builder::append); throw new AnalysisException("tables with unknown column stats: " + builder); } } return rootFragment; } /* ******************************************************************************************** * distribute node * ******************************************************************************************** */ @Override public PlanFragment visitPhysicalDistribute(PhysicalDistribute<? extends Plan> distribute, PlanTranslatorContext context) { Plan child = distribute.child(); PlanFragment inputFragment = child.accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(child); if (inputFragment.getPlanRoot() instanceof AggregationNode && child instanceof PhysicalHashAggregate && context.getFirstAggregateInFragment(inputFragment) == child) { PhysicalHashAggregate<?> hashAggregate = (PhysicalHashAggregate<?>) child; if (hashAggregate.getAggPhase() == AggPhase.LOCAL && hashAggregate.getAggMode() == AggMode.INPUT_TO_BUFFER) { AggregationNode aggregationNode = (AggregationNode) inputFragment.getPlanRoot(); aggregationNode.setUseStreamingPreagg(hashAggregate.isMaybeUsingStream()); } } ExchangeNode exchangeNode = new ExchangeNode(context.nextPlanNodeId(), inputFragment.getPlanRoot()); updateLegacyPlanIdToPhysicalPlan(exchangeNode, distribute); List<ExprId> validOutputIds = distribute.getOutputExprIds(); if (child instanceof PhysicalHashAggregate) { List<ExprId> keys = ((PhysicalHashAggregate<?>) child).getGroupByExpressions().stream() .filter(SlotReference.class::isInstance) .map(SlotReference.class::cast) .map(SlotReference::getExprId) .collect(Collectors.toList()); keys.addAll(validOutputIds); validOutputIds = keys; } else if (child instanceof PhysicalLimit && ((PhysicalLimit<?>) child).getPhase().isGlobal()) { exchangeNode.setOffset(((PhysicalLimit<?>) child).getOffset()); } if (inputFragment instanceof MultiCastPlanFragment) { MultiCastDataSink multiCastDataSink = (MultiCastDataSink) inputFragment.getSink(); DataStreamSink dataStreamSink = multiCastDataSink.getDataStreamSinks().get( multiCastDataSink.getDataStreamSinks().size() - 1); if (!(child instanceof PhysicalProject)) { List<Expr> projectionExprs = new ArrayList<>(); PhysicalCTEConsumer consumer = getCTEConsumerChild(distribute); Preconditions.checkState(consumer != null, "consumer not found"); for (Slot slot : distribute.getOutput()) { projectionExprs.add(ExpressionTranslator.translate(consumer.getProducerSlot(slot), context)); } TupleDescriptor projectionTuple = generateTupleDesc(distribute.getOutput(), null, context); dataStreamSink.setProjections(projectionExprs); dataStreamSink.setOutputTupleDesc(projectionTuple); } } DataPartition dataPartition = toDataPartition(distribute.getDistributionSpec(), validOutputIds, context); exchangeNode.setPartitionType(dataPartition.getType()); exchangeNode.setChildrenDistributeExprLists(distributeExprLists); PlanFragment parentFragment = new PlanFragment(context.nextFragmentId(), exchangeNode, dataPartition); if (distribute.getDistributionSpec() instanceof DistributionSpecGather) { exchangeNode.setNumInstances(1); } else { exchangeNode.setNumInstances(inputFragment.getPlanRoot().getNumInstances()); } if (inputFragment instanceof MultiCastPlanFragment) { MultiCastDataSink multiCastDataSink = (MultiCastDataSink) inputFragment.getSink(); DataStreamSink dataStreamSink = multiCastDataSink.getDataStreamSinks().get( multiCastDataSink.getDataStreamSinks().size() - 1); TupleDescriptor tupleDescriptor = generateTupleDesc(distribute.getOutput(), null, context); exchangeNode.updateTupleIds(tupleDescriptor); dataStreamSink.setExchNodeId(exchangeNode.getId()); dataStreamSink.setOutputPartition(dataPartition); parentFragment.addChild(inputFragment); ((MultiCastPlanFragment) inputFragment).addToDest(exchangeNode); CTEScanNode cteScanNode = context.getCteScanNodeMap().get(inputFragment.getFragmentId()); Preconditions.checkState(cteScanNode != null, "cte scan node is null"); cteScanNode.setFragment(inputFragment); cteScanNode.setPlanNodeId(exchangeNode.getId()); context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> runtimeFilterTranslator.getContext().getPlanNodeIdToCTEDataSinkMap() .put(cteScanNode.getId(), dataStreamSink)); } else { inputFragment.setDestination(exchangeNode); inputFragment.setOutputPartition(dataPartition); DataStreamSink streamSink = new DataStreamSink(exchangeNode.getId()); streamSink.setOutputPartition(dataPartition); inputFragment.setSink(streamSink); } context.addPlanFragment(parentFragment); return parentFragment; } /* ******************************************************************************************** * sink Node, in lexicographical order * ******************************************************************************************** */ @Override public PlanFragment visitPhysicalResultSink(PhysicalResultSink<? extends Plan> physicalResultSink, PlanTranslatorContext context) { PlanFragment planFragment = physicalResultSink.child().accept(this, context); TResultSinkType resultSinkType = context.getConnectContext() != null ? context.getConnectContext().getResultSinkType() : null; planFragment.setSink(new ResultSink(planFragment.getPlanRoot().getId(), resultSinkType)); return planFragment; } @Override public PlanFragment visitPhysicalDeferMaterializeResultSink( PhysicalDeferMaterializeResultSink<? extends Plan> sink, PlanTranslatorContext context) { PlanFragment planFragment = visitPhysicalResultSink(sink.getPhysicalResultSink(), context); TFetchOption fetchOption = sink.getOlapTable().generateTwoPhaseReadOption(sink.getSelectedIndexId()); ((ResultSink) planFragment.getSink()).setFetchOption(fetchOption); return planFragment; } @Override public PlanFragment visitPhysicalOlapTableSink(PhysicalOlapTableSink<? extends Plan> olapTableSink, PlanTranslatorContext context) { PlanFragment rootFragment = olapTableSink.child().accept(this, context); rootFragment.setOutputPartition(DataPartition.UNPARTITIONED); HashSet<String> partialUpdateCols = new HashSet<>(); boolean isPartialUpdate = olapTableSink.isPartialUpdate(); if (isPartialUpdate) { for (Column col : olapTableSink.getCols()) { partialUpdateCols.add(col.getName()); } } TupleDescriptor olapTuple = context.generateTupleDesc(); List<Column> targetTableColumns = olapTableSink.getTargetTable().getFullSchema(); for (Column column : targetTableColumns) { if (isPartialUpdate && !partialUpdateCols.contains(column.getName())) { continue; } SlotDescriptor slotDesc = context.addSlotDesc(olapTuple); slotDesc.setIsMaterialized(true); slotDesc.setType(column.getType()); slotDesc.setColumn(column); slotDesc.setIsNullable(column.isAllowNull()); slotDesc.setAutoInc(column.isAutoInc()); } OlapTableSink sink; if (context.getConnectContext().isGroupCommitStreamLoadSql()) { sink = new GroupCommitBlockSink(olapTableSink.getTargetTable(), olapTuple, olapTableSink.getTargetTable().getPartitionIds(), olapTableSink.isSingleReplicaLoad(), context.getSessionVariable().getGroupCommit(), 0); } else { sink = new OlapTableSink( olapTableSink.getTargetTable(), olapTuple, olapTableSink.getPartitionIds().isEmpty() ? null : olapTableSink.getPartitionIds(), olapTableSink.isSingleReplicaLoad() ); } sink.setPartialUpdateInputColumns(isPartialUpdate, partialUpdateCols); rootFragment.setSink(sink); return rootFragment; } @Override public PlanFragment visitPhysicalHiveTableSink(PhysicalHiveTableSink<? extends Plan> hiveTableSink, PlanTranslatorContext context) { PlanFragment rootFragment = hiveTableSink.child().accept(this, context); rootFragment.setOutputPartition(DataPartition.UNPARTITIONED); TupleDescriptor hiveTuple = context.generateTupleDesc(); List<Column> targetTableColumns = hiveTableSink.getTargetTable().getFullSchema(); for (Column column : targetTableColumns) { SlotDescriptor slotDesc = context.addSlotDesc(hiveTuple); slotDesc.setIsMaterialized(true); slotDesc.setType(column.getType()); slotDesc.setColumn(column); slotDesc.setIsNullable(column.isAllowNull()); slotDesc.setAutoInc(column.isAutoInc()); } HiveTableSink sink = new HiveTableSink(hiveTableSink.getTargetTable()); rootFragment.setSink(sink); return rootFragment; } @Override public PlanFragment visitPhysicalFileSink(PhysicalFileSink<? extends Plan> fileSink, PlanTranslatorContext context) { PlanFragment sinkFragment = fileSink.child().accept(this, context); OutFileClause outFile = new OutFileClause( fileSink.getFilePath(), fileSink.getFormat(), fileSink.getProperties() ); List<Expr> outputExprs = Lists.newArrayList(); fileSink.getOutput().stream().map(Slot::getExprId) .forEach(exprId -> outputExprs.add(context.findSlotRef(exprId))); sinkFragment.setOutputExprs(outputExprs); List<String> labels = fileSink.getOutput().stream().map(NamedExpression::getName).collect(Collectors.toList()); try { outFile.analyze(null, outputExprs, labels); } catch (Exception e) { throw new AnalysisException(e.getMessage(), e.getCause()); } ResultFileSink resultFileSink = new ResultFileSink(sinkFragment.getPlanRoot().getId(), outFile, (ArrayList<String>) labels); sinkFragment.setSink(resultFileSink); if (fileSink.requestProperties(context.getConnectContext()).equals(PhysicalProperties.GATHER)) { return sinkFragment; } else { TupleDescriptor fileStatusDesc = ResultFileSink.constructFileStatusTupleDesc(context.getDescTable()); ExchangeNode exchangeNode = new ExchangeNode(context.nextPlanNodeId(), sinkFragment.getPlanRoot()); exchangeNode.setPartitionType(TPartitionType.UNPARTITIONED); exchangeNode.setNumInstances(1); TResultSinkType resultSinkType = context.getConnectContext() != null ? context.getConnectContext().getResultSinkType() : null; ResultSink resultSink = new ResultSink(exchangeNode.getId(), resultSinkType); PlanFragment topFragment = new PlanFragment(context.nextFragmentId(), exchangeNode, DataPartition.UNPARTITIONED); topFragment.addChild(sinkFragment); topFragment.setSink(resultSink); context.addPlanFragment(topFragment); DataStreamSink streamSink = new DataStreamSink(exchangeNode.getId()); streamSink.setOutputPartition(DataPartition.UNPARTITIONED); resultFileSink.resetByDataStreamSink(streamSink); resultFileSink.setOutputTupleId(fileStatusDesc.getId()); sinkFragment.setDestination(exchangeNode); exchangeNode.resetTupleIds(Lists.newArrayList(fileStatusDesc.getId())); topFragment.resetOutputExprs(fileStatusDesc); return topFragment; } } /* ******************************************************************************************** * scan Node, in lexicographical order * ******************************************************************************************** */ @Override public PlanFragment visitPhysicalFileScan(PhysicalFileScan fileScan, PlanTranslatorContext context) { List<Slot> slots = fileScan.getOutput(); ExternalTable table = fileScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); ScanNode scanNode; if (table instanceof HMSExternalTable) { switch (((HMSExternalTable) table).getDlaType()) { case HUDI: scanNode = new HudiScanNode(context.nextPlanNodeId(), tupleDescriptor, false); break; case ICEBERG: scanNode = new IcebergScanNode(context.nextPlanNodeId(), tupleDescriptor, false); break; case HIVE: scanNode = new HiveScanNode(context.nextPlanNodeId(), tupleDescriptor, false); HiveScanNode hiveScanNode = (HiveScanNode) scanNode; hiveScanNode.setSelectedPartitions(fileScan.getSelectedPartitions()); if (fileScan.getTableSample().isPresent()) { hiveScanNode.setTableSample(new TableSample(fileScan.getTableSample().get().isPercent, fileScan.getTableSample().get().sampleValue, fileScan.getTableSample().get().seek)); } break; default: throw new RuntimeException("do not support DLA type " + ((HMSExternalTable) table).getDlaType()); } } else if (table instanceof IcebergExternalTable) { scanNode = new IcebergScanNode(context.nextPlanNodeId(), tupleDescriptor, false); } else if (table instanceof PaimonExternalTable) { scanNode = new PaimonScanNode(context.nextPlanNodeId(), tupleDescriptor, false); } else if (table instanceof TrinoConnectorExternalTable) { scanNode = new TrinoConnectorScanNode(context.nextPlanNodeId(), tupleDescriptor, false); } else if (table instanceof MaxComputeExternalTable) { scanNode = new MaxComputeScanNode(context.nextPlanNodeId(), tupleDescriptor, false); } else { throw new RuntimeException("do not support table type " + table.getType()); } scanNode.setNereidsId(fileScan.getId()); scanNode.addConjuncts(translateToLegacyConjuncts(fileScan.getConjuncts())); scanNode.setPushDownAggNoGrouping(context.getRelationPushAggOp(fileScan.getRelationId())); TableName tableName = new TableName(null, "", ""); TableRef ref = new TableRef(tableName, null, null); BaseTableRef tableRef = new BaseTableRef(ref, table, tableName); tupleDescriptor.setRef(tableRef); if (fileScan.getStats() != null) { scanNode.setCardinality((long) fileScan.getStats().getRowCount()); } Utils.execWithUncheckedException(scanNode::init); context.addScanNode(scanNode); ScanNode finalScanNode = scanNode; context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(fileScan).forEach( expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, finalScanNode, context) ) ); Utils.execWithUncheckedException(scanNode::finalizeForNereids); DataPartition dataPartition = DataPartition.RANDOM; PlanFragment planFragment = createPlanFragment(scanNode, dataPartition, fileScan); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), fileScan); return planFragment; } @Override public PlanFragment visitPhysicalEmptyRelation(PhysicalEmptyRelation emptyRelation, PlanTranslatorContext context) { List<Slot> output = emptyRelation.getOutput(); TupleDescriptor tupleDescriptor = generateTupleDesc(output, null, context); for (Slot slot : output) { SlotRef slotRef = context.findSlotRef(slot.getExprId()); slotRef.setLabel(slot.getName()); } ArrayList<TupleId> tupleIds = new ArrayList<>(); tupleIds.add(tupleDescriptor.getId()); EmptySetNode emptySetNode = new EmptySetNode(context.nextPlanNodeId(), tupleIds); emptySetNode.setNereidsId(emptyRelation.getId()); PlanFragment planFragment = createPlanFragment(emptySetNode, DataPartition.UNPARTITIONED, emptyRelation); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), emptyRelation); return planFragment; } @Override public PlanFragment visitPhysicalEsScan(PhysicalEsScan esScan, PlanTranslatorContext context) { List<Slot> slots = esScan.getOutput(); ExternalTable table = esScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); EsScanNode esScanNode = new EsScanNode(context.nextPlanNodeId(), tupleDescriptor, true); esScanNode.setNereidsId(esScan.getId()); esScanNode.addConjuncts(translateToLegacyConjuncts(esScan.getConjuncts())); Utils.execWithUncheckedException(esScanNode::init); context.addScanNode(esScanNode); context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(esScan).forEach( expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, esScanNode, context) ) ); Utils.execWithUncheckedException(esScanNode::finalizeForNereids); DataPartition dataPartition = DataPartition.RANDOM; PlanFragment planFragment = new PlanFragment(context.nextFragmentId(), esScanNode, dataPartition); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), esScan); return planFragment; } @Override public PlanFragment visitPhysicalJdbcScan(PhysicalJdbcScan jdbcScan, PlanTranslatorContext context) { List<Slot> slots = jdbcScan.getOutput(); TableIf table = jdbcScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); JdbcScanNode jdbcScanNode = new JdbcScanNode(context.nextPlanNodeId(), tupleDescriptor, table instanceof JdbcExternalTable); jdbcScanNode.setNereidsId(jdbcScan.getId()); jdbcScanNode.addConjuncts(translateToLegacyConjuncts(jdbcScan.getConjuncts())); Utils.execWithUncheckedException(jdbcScanNode::init); context.addScanNode(jdbcScanNode); context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(jdbcScan).forEach( expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, jdbcScanNode, context) ) ); Utils.execWithUncheckedException(jdbcScanNode::finalizeForNereids); DataPartition dataPartition = DataPartition.RANDOM; PlanFragment planFragment = new PlanFragment(context.nextFragmentId(), jdbcScanNode, dataPartition); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), jdbcScan); return planFragment; } @Override public PlanFragment visitPhysicalOdbcScan(PhysicalOdbcScan odbcScan, PlanTranslatorContext context) { List<Slot> slots = odbcScan.getOutput(); TableIf table = odbcScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); OdbcScanNode odbcScanNode = new OdbcScanNode(context.nextPlanNodeId(), tupleDescriptor, (OdbcTable) table); odbcScanNode.setNereidsId(odbcScan.getId()); odbcScanNode.addConjuncts(translateToLegacyConjuncts(odbcScan.getConjuncts())); Utils.execWithUncheckedException(odbcScanNode::init); context.addScanNode(odbcScanNode); context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(odbcScan).forEach( expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, odbcScanNode, context) ) ); Utils.execWithUncheckedException(odbcScanNode::finalizeForNereids); DataPartition dataPartition = DataPartition.RANDOM; PlanFragment planFragment = new PlanFragment(context.nextFragmentId(), odbcScanNode, dataPartition); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), odbcScan); return planFragment; } @Override public PlanFragment visitPhysicalOlapScan(PhysicalOlapScan olapScan, PlanTranslatorContext context) { List<Slot> slots = olapScan.getOutput(); OlapTable olapTable = olapScan.getTable(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, olapTable, context); if (olapScan.getSelectedIndexId() != olapScan.getTable().getBaseIndexId()) { generateTupleDesc(olapScan.getBaseOutputs(), olapTable, context); } OlapScanNode olapScanNode = new OlapScanNode(context.nextPlanNodeId(), tupleDescriptor, "OlapScanNode"); olapScanNode.setNereidsId(olapScan.getId()); if (olapScan.getStats() != null) { if (context.getSessionVariable() != null && context.getSessionVariable().forbidUnknownColStats) { for (int i = 0; i < slots.size(); i++) { SlotReference slot = (SlotReference) slots.get(i); boolean inVisibleCol = slot.getColumn().isPresent() && StatisticConstants.shouldIgnoreCol(olapTable, slot.getColumn().get()); if (olapScan.getStats().findColumnStatistics(slot).isUnKnown() && !isComplexDataType(slot.getDataType()) && !StatisticConstants.isSystemTable(olapTable) && !inVisibleCol) { context.addUnknownStatsColumn(olapScanNode, tupleDescriptor.getSlots().get(i).getId()); } } } } TableName tableName = new TableName(null, "", ""); TableRef ref = new TableRef(tableName, null, null); BaseTableRef tableRef = new BaseTableRef(ref, olapTable, tableName); tupleDescriptor.setRef(tableRef); olapScanNode.setSelectedPartitionIds(olapScan.getSelectedPartitionIds()); olapScanNode.setSampleTabletIds(olapScan.getSelectedTabletIds()); if (olapScan.getTableSample().isPresent()) { olapScanNode.setTableSample(new TableSample(olapScan.getTableSample().get().isPercent, olapScan.getTableSample().get().sampleValue, olapScan.getTableSample().get().seek)); } switch (olapScan.getTable().getKeysType()) { case AGG_KEYS: case UNIQUE_KEYS: case DUP_KEYS: PreAggStatus preAgg = olapScan.getPreAggStatus(); olapScanNode.setSelectedIndexInfo(olapScan.getSelectedIndexId(), preAgg.isOn(), preAgg.getOffReason()); break; default: throw new RuntimeException("Not supported key type: " + olapScan.getTable().getKeysType()); } Utils.execWithUncheckedException(olapScanNode::init); context.addScanNode(olapScanNode); context.getRuntimeTranslator().ifPresent( runtimeFilterTranslator -> runtimeFilterTranslator.getContext().getTargetListByScan(olapScan) .forEach(expr -> runtimeFilterTranslator.translateRuntimeFilterTarget( expr, olapScanNode, context) ) ); olapScanNode.setPushDownAggNoGrouping(context.getRelationPushAggOp(olapScan.getRelationId())); if (context.getTopnFilterContext().isTopnFilterTarget(olapScan)) { olapScanNode.setUseTopnOpt(true); context.getTopnFilterContext().addLegacyTarget(olapScan, olapScanNode); } olapScanNode.finalizeForNereids(); DataPartition dataPartition = DataPartition.RANDOM; if (olapScan.getDistributionSpec() instanceof DistributionSpecHash) { DistributionSpecHash distributionSpecHash = (DistributionSpecHash) olapScan.getDistributionSpec(); List<Expr> partitionExprs = distributionSpecHash.getOrderedShuffledColumns().stream() .map(context::findSlotRef).collect(Collectors.toList()); dataPartition = new DataPartition(TPartitionType.HASH_PARTITIONED, partitionExprs); } PlanFragment planFragment = createPlanFragment(olapScanNode, dataPartition, olapScan); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), olapScan); return planFragment; } @Override public PlanFragment visitPhysicalDeferMaterializeOlapScan( PhysicalDeferMaterializeOlapScan deferMaterializeOlapScan, PlanTranslatorContext context) { PlanFragment planFragment = visitPhysicalOlapScan(deferMaterializeOlapScan.getPhysicalOlapScan(), context); OlapScanNode olapScanNode = (OlapScanNode) planFragment.getPlanRoot(); if (context.getTopnFilterContext().isTopnFilterTarget(deferMaterializeOlapScan)) { olapScanNode.setUseTopnOpt(true); context.getTopnFilterContext().addLegacyTarget(deferMaterializeOlapScan, olapScanNode); } TupleDescriptor tupleDescriptor = context.getTupleDesc(olapScanNode.getTupleId()); for (SlotDescriptor slotDescriptor : tupleDescriptor.getSlots()) { if (deferMaterializeOlapScan.getDeferMaterializeSlotIds() .contains(context.findExprId(slotDescriptor.getId()))) { slotDescriptor.setNeedMaterialize(false); } } context.createSlotDesc(tupleDescriptor, deferMaterializeOlapScan.getColumnIdSlot()); return planFragment; } @Override public PlanFragment visitPhysicalOneRowRelation(PhysicalOneRowRelation oneRowRelation, PlanTranslatorContext context) { List<Slot> slots = oneRowRelation.getLogicalProperties().getOutput(); TupleDescriptor oneRowTuple = generateTupleDesc(slots, null, context); List<Expr> legacyExprs = oneRowRelation.getProjects() .stream() .map(expr -> ExpressionTranslator.translate(expr, context)) .collect(Collectors.toList()); for (int i = 0; i < legacyExprs.size(); i++) { SlotDescriptor slotDescriptor = oneRowTuple.getSlots().get(i); Expr expr = legacyExprs.get(i); slotDescriptor.setSourceExpr(expr); slotDescriptor.setIsNullable(slots.get(i).nullable()); } UnionNode unionNode = new UnionNode(context.nextPlanNodeId(), oneRowTuple.getId()); unionNode.setNereidsId(oneRowRelation.getId()); unionNode.setCardinality(1L); unionNode.addConstExprList(legacyExprs); unionNode.finalizeForNereids(oneRowTuple.getSlots(), new ArrayList<>()); PlanFragment planFragment = createPlanFragment(unionNode, DataPartition.UNPARTITIONED, oneRowRelation); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), oneRowRelation); return planFragment; } @Override public PlanFragment visitPhysicalSchemaScan(PhysicalSchemaScan schemaScan, PlanTranslatorContext context) { TableIf table = schemaScan.getTable(); List<Slot> slots = ImmutableList.copyOf(schemaScan.getOutput()); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context); SchemaScanNode scanNode = null; if (BackendPartitionedSchemaScanNode.isBackendPartitionedSchemaTable( table.getName())) { scanNode = new BackendPartitionedSchemaScanNode(context.nextPlanNodeId(), tupleDescriptor); } else { scanNode = new SchemaScanNode(context.nextPlanNodeId(), tupleDescriptor); } scanNode.setNereidsId(schemaScan.getId()); SchemaScanNode finalScanNode = scanNode; context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(schemaScan) .forEach(expr -> runtimeFilterGenerator .translateRuntimeFilterTarget(expr, finalScanNode, context) ) ); Utils.execWithUncheckedException(scanNode::finalizeForNereids); context.addScanNode(scanNode); PlanFragment planFragment = createPlanFragment(scanNode, DataPartition.RANDOM, schemaScan); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), schemaScan); return planFragment; } @Override public PlanFragment visitPhysicalTVFRelation(PhysicalTVFRelation tvfRelation, PlanTranslatorContext context) { List<Slot> slots = tvfRelation.getLogicalProperties().getOutput(); TupleDescriptor tupleDescriptor = generateTupleDesc(slots, tvfRelation.getFunction().getTable(), context); TableValuedFunctionIf catalogFunction = tvfRelation.getFunction().getCatalogFunction(); ScanNode scanNode = catalogFunction.getScanNode(context.nextPlanNodeId(), tupleDescriptor); scanNode.setNereidsId(tvfRelation.getId()); Utils.execWithUncheckedException(scanNode::init); context.getRuntimeTranslator().ifPresent( runtimeFilterGenerator -> runtimeFilterGenerator.getContext().getTargetListByScan(tvfRelation) .forEach(expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, scanNode, context) ) ); Utils.execWithUncheckedException(scanNode::finalizeForNereids); context.addScanNode(scanNode); for (Slot slot : slots) { String tableColumnName = TableValuedFunctionIf.TVF_TABLE_PREFIX + tvfRelation.getFunction().getName() + "." + slot.getName(); context.findSlotRef(slot.getExprId()).setLabel(tableColumnName); } PlanFragment planFragment = createPlanFragment(scanNode, DataPartition.RANDOM, tvfRelation); context.addPlanFragment(planFragment); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), tvfRelation); return planFragment; } /* ******************************************************************************************** * other Node, in lexicographical order, ignore algorithm name. for example, HashAggregate -> Aggregate * ******************************************************************************************** */ /** * Translate Agg. */ @Override public PlanFragment visitPhysicalHashAggregate( PhysicalHashAggregate<? extends Plan> aggregate, PlanTranslatorContext context) { PlanFragment inputPlanFragment = aggregate.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(aggregate.child(0)); List<Expression> groupByExpressions = aggregate.getGroupByExpressions(); List<NamedExpression> outputExpressions = aggregate.getOutputExpressions(); List<SlotReference> groupSlots = collectGroupBySlots(groupByExpressions, outputExpressions); ArrayList<Expr> execGroupingExpressions = groupByExpressions.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toCollection(ArrayList::new)); List<Slot> aggFunctionOutput = Lists.newArrayList(); List<AggregateExpression> aggregateExpressionList = outputExpressions.stream() .filter(o -> o.anyMatch(AggregateExpression.class::isInstance)) .peek(o -> aggFunctionOutput.add(o.toSlot())) .map(o -> o.<Set<AggregateExpression>>collect(AggregateExpression.class::isInstance)) .flatMap(Set::stream) .collect(Collectors.toList()); ArrayList<FunctionCallExpr> execAggregateFunctions = aggregateExpressionList.stream() .map(aggregateFunction -> (FunctionCallExpr) ExpressionTranslator.translate(aggregateFunction, context)) .collect(Collectors.toCollection(ArrayList::new)); List<Slot> slotList = Lists.newArrayList(); TupleDescriptor outputTupleDesc; slotList.addAll(groupSlots); slotList.addAll(aggFunctionOutput); outputTupleDesc = generateTupleDesc(slotList, null, context); List<Integer> aggFunOutputIds = ImmutableList.of(); if (!aggFunctionOutput.isEmpty()) { aggFunOutputIds = outputTupleDesc .getSlots() .subList(groupSlots.size(), outputTupleDesc.getSlots().size()) .stream() .map(slot -> slot.getId().asInt()) .collect(ImmutableList.toImmutableList()); } boolean isPartial = aggregate.getAggregateParam().aggMode.productAggregateBuffer; AggregateInfo aggInfo = AggregateInfo.create(execGroupingExpressions, execAggregateFunctions, aggFunOutputIds, isPartial, outputTupleDesc, outputTupleDesc, aggregate.getAggPhase().toExec()); AggregationNode aggregationNode = new AggregationNode(context.nextPlanNodeId(), inputPlanFragment.getPlanRoot(), aggInfo); aggregationNode.setChildrenDistributeExprLists(distributeExprLists); aggregationNode.setNereidsId(aggregate.getId()); if (!aggregate.getAggMode().isFinalPhase) { aggregationNode.unsetNeedsFinalize(); } switch (aggregate.getAggPhase()) { case LOCAL: break; case DISTINCT_LOCAL: aggregationNode.setIntermediateTuple(); break; case GLOBAL: case DISTINCT_GLOBAL: break; default: throw new RuntimeException("Unsupported agg phase: " + aggregate.getAggPhase()); } PhysicalHashAggregate firstAggregateInFragment = context.getFirstAggregateInFragment(inputPlanFragment); if (firstAggregateInFragment == null) { context.setFirstAggregateInFragment(inputPlanFragment, aggregate); } PlanNode leftMostNode = inputPlanFragment.getPlanRoot(); while (leftMostNode.getChildren().size() != 0 && !(leftMostNode instanceof ExchangeNode)) { leftMostNode = leftMostNode.getChild(0); } if (leftMostNode instanceof OlapScanNode && inputPlanFragment.getDataPartition().getType() != TPartitionType.RANDOM && aggregate.getAggregateParam().aggMode != AggMode.INPUT_TO_BUFFER) { inputPlanFragment.setHasColocatePlanNode(true); aggregationNode.setColocate(true); } setPlanRoot(inputPlanFragment, aggregationNode, aggregate); if (aggregate.getStats() != null) { aggregationNode.setCardinality((long) aggregate.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(inputPlanFragment.getPlanRoot(), aggregate); return inputPlanFragment; } @Override public PlanFragment visitPhysicalStorageLayerAggregate( PhysicalStorageLayerAggregate storageLayerAggregate, PlanTranslatorContext context) { Preconditions.checkState((storageLayerAggregate.getRelation() instanceof PhysicalOlapScan || storageLayerAggregate.getRelation() instanceof PhysicalFileScan), "PhysicalStorageLayerAggregate only support PhysicalOlapScan and PhysicalFileScan: " + storageLayerAggregate.getRelation().getClass().getName()); TPushAggOp pushAggOp; switch (storageLayerAggregate.getAggOp()) { case COUNT: pushAggOp = TPushAggOp.COUNT; break; case COUNT_ON_MATCH: pushAggOp = TPushAggOp.COUNT_ON_INDEX; break; case MIN_MAX: pushAggOp = TPushAggOp.MINMAX; break; case MIX: pushAggOp = TPushAggOp.MIX; break; default: throw new AnalysisException("Unsupported storage layer aggregate: " + storageLayerAggregate.getAggOp()); } context.setRelationPushAggOp( storageLayerAggregate.getRelation().getRelationId(), pushAggOp); PlanFragment planFragment = storageLayerAggregate.getRelation().accept(this, context); updateLegacyPlanIdToPhysicalPlan(planFragment.getPlanRoot(), storageLayerAggregate); return planFragment; } @Override public PlanFragment visitPhysicalAssertNumRows(PhysicalAssertNumRows<? extends Plan> assertNumRows, PlanTranslatorContext context) { PlanFragment currentFragment = assertNumRows.child().accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(assertNumRows.child()); TupleDescriptor tupleDescriptor = context.generateTupleDesc(); AssertNumRowsNode assertNumRowsNode = new AssertNumRowsNode(context.nextPlanNodeId(), currentFragment.getPlanRoot(), ExpressionTranslator.translateAssert(assertNumRows.getAssertNumRowsElement()), true, tupleDescriptor); assertNumRowsNode.setChildrenDistributeExprLists(distributeExprLists); assertNumRowsNode.setNereidsId(assertNumRows.getId()); List<TupleDescriptor> childTuples = context.getTupleDesc(currentFragment.getPlanRoot()); List<SlotDescriptor> childSlotDescriptors = childTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); Map<ExprId, SlotReference> childOutputMap = Maps.newHashMap(); assertNumRows.child().getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> childOutputMap.put(s.getExprId(), s)); List<SlotDescriptor> slotDescriptors = Lists.newArrayList(); for (SlotDescriptor slot : childSlotDescriptors) { SlotReference sf = childOutputMap.get(context.findExprId(slot.getId())); SlotDescriptor sd = context.createSlotDesc(tupleDescriptor, sf, slot.getParent().getTable()); slotDescriptors.add(sd); } slotDescriptors.forEach(sd -> sd.setIsNullable(true)); addPlanRoot(currentFragment, assertNumRowsNode, assertNumRows); return currentFragment; } /** * NOTICE: Must translate left, which it's the producer of consumer. */ @Override public PlanFragment visitPhysicalCTEAnchor(PhysicalCTEAnchor<? extends Plan, ? extends Plan> cteAnchor, PlanTranslatorContext context) { cteAnchor.child(0).accept(this, context); return cteAnchor.child(1).accept(this, context); } @Override public PlanFragment visitPhysicalCTEConsumer(PhysicalCTEConsumer cteConsumer, PlanTranslatorContext context) { CTEId cteId = cteConsumer.getCteId(); MultiCastPlanFragment multiCastFragment = (MultiCastPlanFragment) context.getCteProduceFragments().get(cteId); Preconditions.checkState(multiCastFragment.getSink() instanceof MultiCastDataSink, "invalid multiCastFragment"); MultiCastDataSink multiCastDataSink = (MultiCastDataSink) multiCastFragment.getSink(); Preconditions.checkState(multiCastDataSink != null, "invalid multiCastDataSink"); PhysicalCTEProducer<?> cteProducer = context.getCteProduceMap().get(cteId); Preconditions.checkState(cteProducer != null, "invalid cteProducer"); context.getCteConsumerMap().put(cteId, cteConsumer); DataStreamSink streamSink = new DataStreamSink(); streamSink.setFragment(multiCastFragment); multiCastDataSink.getDataStreamSinks().add(streamSink); multiCastDataSink.getDestinations().add(Lists.newArrayList()); TupleDescriptor tupleDescriptor = null; for (Slot producerSlot : cteProducer.getOutput()) { Slot consumerSlot = cteConsumer.getProducerToConsumerSlotMap().get(producerSlot); SlotRef slotRef = context.findSlotRef(producerSlot.getExprId()); tupleDescriptor = slotRef.getDesc().getParent(); context.addExprIdSlotRefPair(consumerSlot.getExprId(), slotRef); } CTEScanNode cteScanNode = new CTEScanNode(tupleDescriptor); context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> runtimeFilterTranslator.getContext().getTargetListByScan(cteConsumer).forEach( expr -> runtimeFilterTranslator.translateRuntimeFilterTarget(expr, cteScanNode, context))); context.getCteScanNodeMap().put(multiCastFragment.getFragmentId(), cteScanNode); return multiCastFragment; } @Override public PlanFragment visitPhysicalCTEProducer(PhysicalCTEProducer<? extends Plan> cteProducer, PlanTranslatorContext context) { PlanFragment child = cteProducer.child().accept(this, context); CTEId cteId = cteProducer.getCteId(); context.getPlanFragments().remove(child); MultiCastPlanFragment multiCastPlanFragment = new MultiCastPlanFragment(child); MultiCastDataSink multiCastDataSink = new MultiCastDataSink(); multiCastPlanFragment.setSink(multiCastDataSink); List<Expr> outputs = cteProducer.getOutput().stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); multiCastPlanFragment.setOutputExprs(outputs); context.getCteProduceFragments().put(cteId, multiCastPlanFragment); context.getCteProduceMap().put(cteId, cteProducer); if (context.getRuntimeTranslator().isPresent()) { context.getRuntimeTranslator().get().getContext().getCteProduceMap().put(cteId, cteProducer); } context.getPlanFragments().add(multiCastPlanFragment); return child; } @Override public PlanFragment visitPhysicalFilter(PhysicalFilter<? extends Plan> filter, PlanTranslatorContext context) { if (filter.child(0) instanceof AbstractPhysicalJoin) { AbstractPhysicalJoin<?, ?> join = (AbstractPhysicalJoin<?, ?>) filter.child(); join.addFilterConjuncts(filter.getConjuncts()); } PlanFragment inputFragment = filter.child(0).accept(this, context); if (inputFragment instanceof MultiCastPlanFragment) { MultiCastDataSink multiCastDataSink = (MultiCastDataSink) inputFragment.getSink(); DataStreamSink dataStreamSink = multiCastDataSink.getDataStreamSinks().get( multiCastDataSink.getDataStreamSinks().size() - 1); filter.getConjuncts().stream() .map(e -> ExpressionTranslator.translate(e, context)) .forEach(dataStreamSink::addConjunct); return inputFragment; } PlanNode planNode = inputFragment.getPlanRoot(); Plan child = filter.child(); while (child instanceof PhysicalLimit) { child = ((PhysicalLimit<?>) child).child(); } if (planNode instanceof ExchangeNode || planNode instanceof SortNode || planNode instanceof UnionNode || (child instanceof PhysicalProject && !((PhysicalProject<?>) child).hasPushedDownToProjectionFunctions())) { SelectNode selectNode = new SelectNode(context.nextPlanNodeId(), planNode); selectNode.setNereidsId(filter.getId()); addConjunctsToPlanNode(filter, selectNode, context); addPlanRoot(inputFragment, selectNode, filter); } else { if (!(filter.child(0) instanceof AbstractPhysicalJoin)) { addConjunctsToPlanNode(filter, planNode, context); updateLegacyPlanIdToPhysicalPlan(inputFragment.getPlanRoot(), filter); } } if (filter.getStats() != null) { inputFragment.getPlanRoot().setCardinalityAfterFilter((long) filter.getStats().getRowCount()); } return inputFragment; } @Override public PlanFragment visitPhysicalGenerate(PhysicalGenerate<? extends Plan> generate, PlanTranslatorContext context) { PlanFragment currentFragment = generate.child().accept(this, context); ArrayList<Expr> functionCalls = generate.getGenerators().stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toCollection(ArrayList::new)); TupleDescriptor tupleDescriptor = generateTupleDesc(generate.getGeneratorOutput(), null, context); List<TupleId> childOutputTupleIds = currentFragment.getPlanRoot().getOutputTupleIds(); if (childOutputTupleIds == null || childOutputTupleIds.isEmpty()) { childOutputTupleIds = currentFragment.getPlanRoot().getTupleIds(); } List<SlotId> outputSlotIds = Stream.concat(childOutputTupleIds.stream(), Stream.of(tupleDescriptor.getId())) .map(id -> context.getTupleDesc(id).getSlots()) .flatMap(List::stream) .map(SlotDescriptor::getId) .collect(Collectors.toList()); TableFunctionNode tableFunctionNode = new TableFunctionNode(context.nextPlanNodeId(), currentFragment.getPlanRoot(), tupleDescriptor.getId(), functionCalls, outputSlotIds); tableFunctionNode.setNereidsId(generate.getId()); addPlanRoot(currentFragment, tableFunctionNode, generate); return currentFragment; } /** * the contract of hash join node with BE * 1. hash join contains 3 types of predicates: * a. equal join conjuncts * b. other join conjuncts * c. other predicates (denoted by filter conjuncts in the rest of comments) * <p> * 2. hash join contains 3 tuple descriptors * a. input tuple descriptors, corresponding to the left child output and right child output. * If its column is selected, it will be displayed in explain by `tuple ids`. * for example, select L.* from L join R on ..., because no column from R are selected, tuple ids only * contains output tuple of L. * equal join conjuncts is bound on input tuple descriptors. * <p> * b.intermediate tuple. * This tuple describes schema of the output block after evaluating equal join conjuncts * and other join conjuncts. * <p> * Other join conjuncts currently is bound on intermediate tuple. There are some historical reason, and it * should be bound on input tuple in the future. * <p> * filter conjuncts will be evaluated on the intermediate tuple. That means the input block of filter is * described by intermediate tuple, and hence filter conjuncts should be bound on intermediate tuple. * <p> * In order to be compatible with old version, intermediate tuple is not pruned. For example, intermediate * tuple contains all slots from both sides of children. After probing hash-table, BE does not need to * materialize all slots in intermediate tuple. The slots in HashJoinNode.hashOutputSlotIds will be * materialized by BE. If `hashOutputSlotIds` is empty, all slots will be materialized. * <p> * In case of outer join, the slots in intermediate should be set nullable. * For example, * select L.*, R.* from L left outer join R on ... * All slots from R in intermediate tuple should be nullable. * <p> * c. output tuple * This describes the schema of hash join output block. * 3. Intermediate tuple * for BE performance reason, the slots in intermediate tuple * depends on the join type and other join conjuncts. * In general, intermediate tuple contains all slots of both children, except one case. * For left-semi/left-ant (right-semi/right-semi) join without other join conjuncts, intermediate tuple * only contains left (right) children output slots. * */ @Override public PlanFragment visitPhysicalHashJoin( PhysicalHashJoin<? extends Plan, ? extends Plan> hashJoin, PlanTranslatorContext context) { Preconditions.checkArgument(hashJoin.left() instanceof PhysicalPlan, "HashJoin's left child should be PhysicalPlan"); Preconditions.checkArgument(hashJoin.right() instanceof PhysicalPlan, "HashJoin's left child should be PhysicalPlan"); PhysicalHashJoin<PhysicalPlan, PhysicalPlan> physicalHashJoin = (PhysicalHashJoin<PhysicalPlan, PhysicalPlan>) hashJoin; PlanFragment rightFragment = hashJoin.child(1).accept(this, context); PlanFragment leftFragment = hashJoin.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(physicalHashJoin.left(), physicalHashJoin.right()); if (JoinUtils.shouldNestedLoopJoin(hashJoin)) { throw new RuntimeException("Physical hash join could not execute without equal join condition."); } PlanNode leftPlanRoot = leftFragment.getPlanRoot(); PlanNode rightPlanRoot = rightFragment.getPlanRoot(); JoinType joinType = hashJoin.getJoinType(); List<Expr> execEqConjuncts = hashJoin.getHashJoinConjuncts().stream() .map(EqualPredicate.class::cast) .map(e -> JoinUtils.swapEqualToForChildrenOrder(e, hashJoin.left().getOutputSet())) .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); List<Expr> markConjuncts = ImmutableList.of(); boolean isHashJoinConjunctsEmpty = hashJoin.getHashJoinConjuncts().isEmpty(); boolean isMarkJoinConjunctsEmpty = hashJoin.getMarkJoinConjuncts().isEmpty(); JoinOperator joinOperator = JoinType.toJoinOperator(joinType); if (isHashJoinConjunctsEmpty) { Preconditions.checkState(!isMarkJoinConjunctsEmpty, "mark join conjuncts should not be empty."); markConjuncts = hashJoin.getMarkJoinConjuncts().stream() .map(EqualPredicate.class::cast) .map(e -> JoinUtils.swapEqualToForChildrenOrder(e, hashJoin.left().getOutputSet())) .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); if (joinOperator == JoinOperator.LEFT_ANTI_JOIN) { joinOperator = JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN; } else if (joinOperator == JoinOperator.LEFT_SEMI_JOIN) { joinOperator = JoinOperator.NULL_AWARE_LEFT_SEMI_JOIN; } } HashJoinNode hashJoinNode = new HashJoinNode(context.nextPlanNodeId(), leftPlanRoot, rightPlanRoot, joinOperator, execEqConjuncts, Lists.newArrayList(), markConjuncts, null, null, null, hashJoin.isMarkJoin()); hashJoinNode.setNereidsId(hashJoin.getId()); hashJoinNode.setChildrenDistributeExprLists(distributeExprLists); hashJoinNode.setUseSpecificProjections(false); PlanFragment currentFragment = connectJoinNode(hashJoinNode, leftFragment, rightFragment, context, hashJoin); if (JoinUtils.shouldColocateJoin(physicalHashJoin)) { hashJoinNode.setColocate(true, ""); leftFragment.setHasColocatePlanNode(true); } else if (JoinUtils.shouldBroadcastJoin(physicalHashJoin)) { Preconditions.checkState(rightPlanRoot instanceof ExchangeNode, "right child of broadcast join must be ExchangeNode but it is " + rightFragment.getPlanRoot()); Preconditions.checkState(rightFragment.getChildren().size() == 1, "right child of broadcast join must have 1 child, but meet " + rightFragment.getChildren().size()); ((ExchangeNode) rightPlanRoot).setRightChildOfBroadcastHashJoin(true); hashJoinNode.setDistributionMode(DistributionMode.BROADCAST); } else if (JoinUtils.shouldBucketShuffleJoin(physicalHashJoin)) { hashJoinNode.setDistributionMode(DistributionMode.BUCKET_SHUFFLE); } else { hashJoinNode.setDistributionMode(DistributionMode.PARTITIONED); } List<TupleDescriptor> leftTuples = context.getTupleDesc(leftPlanRoot); List<SlotDescriptor> leftSlotDescriptors = leftTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); List<TupleDescriptor> rightTuples = context.getTupleDesc(rightPlanRoot); List<SlotDescriptor> rightSlotDescriptors = rightTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); Map<ExprId, SlotReference> outputSlotReferenceMap = Maps.newHashMap(); hashJoin.getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); List<SlotReference> outputSlotReferences = Stream.concat(leftTuples.stream(), rightTuples.stream()) .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .map(sd -> context.findExprId(sd.getId())) .map(outputSlotReferenceMap::get) .filter(Objects::nonNull) .collect(Collectors.toList()); Map<ExprId, SlotReference> hashOutputSlotReferenceMap = Maps.newHashMap(outputSlotReferenceMap); hashJoin.getOtherJoinConjuncts() .stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .flatMap(e -> e.getInputSlots().stream()) .map(SlotReference.class::cast) .forEach(s -> hashOutputSlotReferenceMap.put(s.getExprId(), s)); if (!isHashJoinConjunctsEmpty && !isMarkJoinConjunctsEmpty) { hashJoin.getMarkJoinConjuncts() .stream() .flatMap(e -> e.getInputSlots().stream()) .map(SlotReference.class::cast) .forEach(s -> hashOutputSlotReferenceMap.put(s.getExprId(), s)); } hashJoin.getFilterConjuncts().stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .flatMap(e -> e.getInputSlots().stream()) .map(SlotReference.class::cast) .forEach(s -> hashOutputSlotReferenceMap.put(s.getExprId(), s)); Map<ExprId, SlotReference> leftChildOutputMap = Maps.newHashMap(); hashJoin.child(0).getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> leftChildOutputMap.put(s.getExprId(), s)); Map<ExprId, SlotReference> rightChildOutputMap = Maps.newHashMap(); hashJoin.child(1).getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> rightChildOutputMap.put(s.getExprId(), s)); context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> physicalHashJoin.getRuntimeFilters() .forEach(filter -> runtimeFilterTranslator.createLegacyRuntimeFilter(filter, hashJoinNode, context))); List<SlotDescriptor> leftIntermediateSlotDescriptor = Lists.newArrayList(); List<SlotDescriptor> rightIntermediateSlotDescriptor = Lists.newArrayList(); TupleDescriptor intermediateDescriptor = context.generateTupleDesc(); if (hashJoin.getOtherJoinConjuncts().isEmpty() && (isHashJoinConjunctsEmpty != isMarkJoinConjunctsEmpty) && (joinType == JoinType.LEFT_ANTI_JOIN || joinType == JoinType.LEFT_SEMI_JOIN || joinType == JoinType.NULL_AWARE_LEFT_ANTI_JOIN)) { for (SlotDescriptor leftSlotDescriptor : leftSlotDescriptors) { if (!leftSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = leftChildOutputMap.get(context.findExprId(leftSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && leftSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, leftSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, leftSlotDescriptor.getParent().getTable()); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(leftSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), leftSlotDescriptor.getId()); } } leftIntermediateSlotDescriptor.add(sd); } } else if (hashJoin.getOtherJoinConjuncts().isEmpty() && (isHashJoinConjunctsEmpty != isMarkJoinConjunctsEmpty) && (joinType == JoinType.RIGHT_ANTI_JOIN || joinType == JoinType.RIGHT_SEMI_JOIN)) { for (SlotDescriptor rightSlotDescriptor : rightSlotDescriptors) { if (!rightSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = rightChildOutputMap.get(context.findExprId(rightSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && rightSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(rightSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), rightSlotDescriptor.getId()); } } rightIntermediateSlotDescriptor.add(sd); } } else { for (SlotDescriptor leftSlotDescriptor : leftSlotDescriptors) { if (!leftSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = leftChildOutputMap.get(context.findExprId(leftSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && leftSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, leftSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, leftSlotDescriptor.getParent().getTable()); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(leftSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), leftSlotDescriptor.getId()); } } leftIntermediateSlotDescriptor.add(sd); } for (SlotDescriptor rightSlotDescriptor : rightSlotDescriptors) { if (!rightSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = rightChildOutputMap.get(context.findExprId(rightSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && rightSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(rightSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), rightSlotDescriptor.getId()); } } rightIntermediateSlotDescriptor.add(sd); } } if (hashJoin.getMarkJoinSlotReference().isPresent()) { SlotReference sf = hashJoin.getMarkJoinSlotReference().get(); outputSlotReferences.add(sf); context.createSlotDesc(intermediateDescriptor, sf); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { SlotRef markJoinSlotId = context.findSlotRef(sf.getExprId()); Preconditions.checkState(markJoinSlotId != null); hashJoinNode.addSlotIdToHashOutputSlotIds(markJoinSlotId.getSlotId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), markJoinSlotId.getSlotId()); } } if (joinType == JoinType.LEFT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { rightIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); } if (joinType == JoinType.RIGHT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { leftIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); } List<Expr> otherJoinConjuncts = hashJoin.getOtherJoinConjuncts() .stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); hashJoin.getFilterConjuncts().stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .map(e -> ExpressionTranslator.translate(e, context)) .forEach(hashJoinNode::addConjunct); hashJoinNode.setOtherJoinConjuncts(otherJoinConjuncts); if (!isHashJoinConjunctsEmpty && !isMarkJoinConjunctsEmpty) { List<Expr> markJoinConjuncts = hashJoin.getMarkJoinConjuncts() .stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); hashJoinNode.setMarkJoinConjuncts(markJoinConjuncts); } hashJoinNode.setvIntermediateTupleDescList(Lists.newArrayList(intermediateDescriptor)); if (hashJoin.isShouldTranslateOutput()) { List<Expr> srcToOutput = outputSlotReferences.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); TupleDescriptor outputDescriptor = context.generateTupleDesc(); outputSlotReferences.forEach(s -> context.createSlotDesc(outputDescriptor, s)); hashJoinNode.setOutputTupleDesc(outputDescriptor); hashJoinNode.setProjectList(srcToOutput); } if (hashJoin.getStats() != null) { hashJoinNode.setCardinality((long) hashJoin.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(currentFragment.getPlanRoot(), hashJoin); return currentFragment; } @Override public PlanFragment visitPhysicalNestedLoopJoin( PhysicalNestedLoopJoin<? extends Plan, ? extends Plan> nestedLoopJoin, PlanTranslatorContext context) { PlanFragment rightFragment = nestedLoopJoin.child(1).accept(this, context); PlanFragment leftFragment = nestedLoopJoin.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(nestedLoopJoin.child(0), nestedLoopJoin.child(1)); PlanNode leftFragmentPlanRoot = leftFragment.getPlanRoot(); PlanNode rightFragmentPlanRoot = rightFragment.getPlanRoot(); if (JoinUtils.shouldNestedLoopJoin(nestedLoopJoin)) { List<TupleDescriptor> leftTuples = context.getTupleDesc(leftFragmentPlanRoot); List<TupleDescriptor> rightTuples = context.getTupleDesc(rightFragmentPlanRoot); List<TupleId> tupleIds = Stream.concat(leftTuples.stream(), rightTuples.stream()) .map(TupleDescriptor::getId) .collect(Collectors.toList()); JoinType joinType = nestedLoopJoin.getJoinType(); NestedLoopJoinNode nestedLoopJoinNode = new NestedLoopJoinNode(context.nextPlanNodeId(), leftFragmentPlanRoot, rightFragmentPlanRoot, tupleIds, JoinType.toJoinOperator(joinType), null, null, null, nestedLoopJoin.isMarkJoin()); nestedLoopJoinNode.setUseSpecificProjections(false); nestedLoopJoinNode.setNereidsId(nestedLoopJoin.getId()); nestedLoopJoinNode.setChildrenDistributeExprLists(distributeExprLists); if (nestedLoopJoin.getStats() != null) { nestedLoopJoinNode.setCardinality((long) nestedLoopJoin.getStats().getRowCount()); } nestedLoopJoinNode.setChild(0, leftFragment.getPlanRoot()); nestedLoopJoinNode.setChild(1, rightFragment.getPlanRoot()); setPlanRoot(leftFragment, nestedLoopJoinNode, nestedLoopJoin); rightFragment.getPlanRoot().setCompactData(false); context.mergePlanFragment(rightFragment, leftFragment); for (PlanFragment rightChild : rightFragment.getChildren()) { leftFragment.addChild(rightChild); } context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> { List<RuntimeFilter> filters = nestedLoopJoin.getRuntimeFilters(); filters.forEach(filter -> runtimeFilterTranslator .createLegacyRuntimeFilter(filter, nestedLoopJoinNode, context)); if (filters.stream().anyMatch(filter -> filter.getType() == TRuntimeFilterType.BITMAP)) { nestedLoopJoinNode.setOutputLeftSideOnly(true); } }); Map<ExprId, SlotReference> leftChildOutputMap = Maps.newHashMap(); nestedLoopJoin.child(0).getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> leftChildOutputMap.put(s.getExprId(), s)); Map<ExprId, SlotReference> rightChildOutputMap = Maps.newHashMap(); nestedLoopJoin.child(1).getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> rightChildOutputMap.put(s.getExprId(), s)); List<SlotDescriptor> leftIntermediateSlotDescriptor = Lists.newArrayList(); List<SlotDescriptor> rightIntermediateSlotDescriptor = Lists.newArrayList(); TupleDescriptor intermediateDescriptor = context.generateTupleDesc(); List<SlotDescriptor> leftSlotDescriptors = leftTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); List<SlotDescriptor> rightSlotDescriptors = rightTuples.stream() .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); Map<ExprId, SlotReference> outputSlotReferenceMap = Maps.newHashMap(); nestedLoopJoin.getOutput().stream() .map(SlotReference.class::cast) .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); nestedLoopJoin.getFilterConjuncts().stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .flatMap(e -> e.getInputSlots().stream()) .map(SlotReference.class::cast) .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); List<SlotReference> outputSlotReferences = Stream.concat(leftTuples.stream(), rightTuples.stream()) .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .map(sd -> context.findExprId(sd.getId())) .map(outputSlotReferenceMap::get) .filter(Objects::nonNull) .collect(Collectors.toList()); for (SlotDescriptor leftSlotDescriptor : leftSlotDescriptors) { if (!leftSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = leftChildOutputMap.get(context.findExprId(leftSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && leftSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, leftSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, leftSlotDescriptor.getParent().getTable()); } leftIntermediateSlotDescriptor.add(sd); } for (SlotDescriptor rightSlotDescriptor : rightSlotDescriptors) { if (!rightSlotDescriptor.isMaterialized()) { continue; } SlotReference sf = rightChildOutputMap.get(context.findExprId(rightSlotDescriptor.getId())); SlotDescriptor sd; if (sf == null && rightSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); } rightIntermediateSlotDescriptor.add(sd); } if (nestedLoopJoin.getMarkJoinSlotReference().isPresent()) { outputSlotReferences.add(nestedLoopJoin.getMarkJoinSlotReference().get()); context.createSlotDesc(intermediateDescriptor, nestedLoopJoin.getMarkJoinSlotReference().get()); } if (joinType == JoinType.LEFT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { rightIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); } if (joinType == JoinType.RIGHT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { leftIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); } nestedLoopJoinNode.setvIntermediateTupleDescList(Lists.newArrayList(intermediateDescriptor)); List<Expr> joinConjuncts = nestedLoopJoin.getOtherJoinConjuncts().stream() .filter(e -> !nestedLoopJoin.isBitmapRuntimeFilterCondition(e)) .map(e -> ExpressionTranslator.translate(e, context)).collect(Collectors.toList()); if (!nestedLoopJoin.isBitMapRuntimeFilterConditionsEmpty() && joinConjuncts.isEmpty()) { joinConjuncts.add(new BoolLiteral(true)); } nestedLoopJoinNode.setJoinConjuncts(joinConjuncts); if (!nestedLoopJoin.getOtherJoinConjuncts().isEmpty()) { List<Expr> markJoinConjuncts = nestedLoopJoin.getMarkJoinConjuncts().stream() .map(e -> ExpressionTranslator.translate(e, context)).collect(Collectors.toList()); nestedLoopJoinNode.setMarkJoinConjuncts(markJoinConjuncts); } nestedLoopJoin.getFilterConjuncts().stream() .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .map(e -> ExpressionTranslator.translate(e, context)) .forEach(nestedLoopJoinNode::addConjunct); if (nestedLoopJoin.isShouldTranslateOutput()) { List<Expr> srcToOutput = outputSlotReferences.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); TupleDescriptor outputDescriptor = context.generateTupleDesc(); outputSlotReferences.forEach(s -> context.createSlotDesc(outputDescriptor, s)); nestedLoopJoinNode.setOutputTupleDesc(outputDescriptor); nestedLoopJoinNode.setProjectList(srcToOutput); } if (nestedLoopJoin.getStats() != null) { nestedLoopJoinNode.setCardinality((long) nestedLoopJoin.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(leftFragment.getPlanRoot(), nestedLoopJoin); return leftFragment; } else { throw new RuntimeException("Physical nested loop join could not execute with equal join condition."); } } @Override public PlanFragment visitPhysicalLimit(PhysicalLimit<? extends Plan> physicalLimit, PlanTranslatorContext context) { PlanFragment inputFragment = physicalLimit.child(0).accept(this, context); PlanNode child = inputFragment.getPlanRoot(); child.setLimit(MergeLimits.mergeLimit(physicalLimit.getLimit(), physicalLimit.getOffset(), child.getLimit())); updateLegacyPlanIdToPhysicalPlan(child, physicalLimit); return inputFragment; } @Override public PlanFragment visitPhysicalPartitionTopN(PhysicalPartitionTopN<? extends Plan> partitionTopN, PlanTranslatorContext context) { PlanFragment inputFragment = partitionTopN.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(partitionTopN.child(0)); PartitionSortNode partitionSortNode = translatePartitionSortNode( partitionTopN, inputFragment.getPlanRoot(), context); partitionSortNode.setChildrenDistributeExprLists(distributeExprLists); addPlanRoot(inputFragment, partitionSortNode, partitionTopN); if (partitionTopN.getPhase() == PartitionTopnPhase.ONE_PHASE_GLOBAL_PTOPN && findOlapScanNodesByPassExchangeAndJoinNode(inputFragment.getPlanRoot())) { inputFragment.setHasColocatePlanNode(true); } return inputFragment; } private List<Expression> getPushDownToProjectionFunctionForRewritten(NamedExpression expression) { List<Expression> targetExprList = expression.collectToList(PushDownToProjectionFunction.class::isInstance); return targetExprList.stream() .filter(PushDownToProjectionFunction::validToPushDown) .collect(Collectors.toList()); } private void registerRewrittenSlot(PhysicalProject<? extends Plan> project, OlapScanNode olapScanNode) { List<Expression> allPushDownProjectionFunctions = project.getProjects().stream() .map(this::getPushDownToProjectionFunctionForRewritten) .flatMap(List::stream) .collect(Collectors.toList()); for (Expression expr : allPushDownProjectionFunctions) { PushDownToProjectionFunction function = (PushDownToProjectionFunction) expr; if (context != null && context.getConnectContext() != null && context.getConnectContext().getStatementContext() != null) { Slot argumentSlot = function.getInputSlots().stream().findFirst().get(); Expression rewrittenSlot = PushDownToProjectionFunction.rewriteToSlot( function, (SlotReference) argumentSlot); TupleDescriptor tupleDescriptor = context.getTupleDesc(olapScanNode.getTupleId()); context.createSlotDesc(tupleDescriptor, (SlotReference) rewrittenSlot); } } } @Override /** * Returns a new fragment with a UnionNode as its root. The data partition of the * returned fragment and how the data of the child fragments is consumed depends on the * data partitions of the child fragments: * - All child fragments are unpartitioned or partitioned: The returned fragment has an * UNPARTITIONED or RANDOM data partition, respectively. The UnionNode absorbs the * plan trees of all child fragments. * - Mixed partitioned/unpartitioned child fragments: The returned fragment is * RANDOM partitioned. The plan trees of all partitioned child fragments are absorbed * into the UnionNode. All unpartitioned child fragments are connected to the * UnionNode via a RANDOM exchange, and remain unchanged otherwise. */ @Override public PlanFragment visitPhysicalSetOperation( PhysicalSetOperation setOperation, PlanTranslatorContext context) { List<PlanFragment> childrenFragments = new ArrayList<>(); for (Plan plan : setOperation.children()) { childrenFragments.add(plan.accept(this, context)); } TupleDescriptor setTuple = generateTupleDesc(setOperation.getOutput(), null, context); List<SlotDescriptor> outputSlotDescs = new ArrayList<>(setTuple.getSlots()); SetOperationNode setOperationNode; if (setOperation instanceof PhysicalUnion) { setOperationNode = new UnionNode(context.nextPlanNodeId(), setTuple.getId()); } else if (setOperation instanceof PhysicalExcept) { setOperationNode = new ExceptNode(context.nextPlanNodeId(), setTuple.getId()); } else if (setOperation instanceof PhysicalIntersect) { setOperationNode = new IntersectNode(context.nextPlanNodeId(), setTuple.getId()); } else { throw new RuntimeException("not support set operation type " + setOperation); } setOperationNode.setNereidsId(setOperation.getId()); setOperation.getRegularChildrenOutputs().stream() .map(o -> o.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(ImmutableList.toImmutableList())) .forEach(setOperationNode::addResultExprLists); if (setOperation instanceof PhysicalUnion) { ((PhysicalUnion) setOperation).getConstantExprsList().stream() .map(l -> l.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(ImmutableList.toImmutableList())) .forEach(setOperationNode::addConstExprList); } for (PlanFragment childFragment : childrenFragments) { setOperationNode.addChild(childFragment.getPlanRoot()); } setOperationNode.finalizeForNereids(outputSlotDescs, outputSlotDescs); PlanFragment setOperationFragment; if (childrenFragments.isEmpty()) { setOperationFragment = createPlanFragment(setOperationNode, DataPartition.UNPARTITIONED, setOperation); context.addPlanFragment(setOperationFragment); } else { int childrenSize = childrenFragments.size(); setOperationFragment = childrenFragments.get(childrenSize - 1); for (int i = childrenSize - 2; i >= 0; i--) { context.mergePlanFragment(childrenFragments.get(i), setOperationFragment); for (PlanFragment child : childrenFragments.get(i).getChildren()) { setOperationFragment.addChild(child); } } setPlanRoot(setOperationFragment, setOperationNode, setOperation); } if (!setOperation.getPhysicalProperties().equals(PhysicalProperties.ANY) && findOlapScanNodesByPassExchangeAndJoinNode(setOperationFragment.getPlanRoot())) { setOperationFragment.setHasColocatePlanNode(true); setOperationNode.setColocate(true); } return setOperationFragment; } /*- * Physical sort: * 1. Build sortInfo * There are two types of slotRef: * one is generated by the previous node, collectively called old. * the other is newly generated by the sort node, collectively called new. * Filling of sortInfo related data structures, * a. ordering use newSlotRef. * b. sortTupleSlotExprs use oldSlotRef. * 2. Create sortNode * 3. Create mergeFragment * TODO: When the slotRef of sort is currently generated, * it will be based on the expression in select and orderBy expression in to ensure the uniqueness of slotRef. * But eg: * select a+1 from table order by a+1; * the expressions of the two are inconsistent. * The former will perform an additional Alias. * Currently we cannot test whether this will have any effect. * After a+1 can be parsed , reprocessing. */ @Override public PlanFragment visitPhysicalQuickSort(PhysicalQuickSort<? extends Plan> sort, PlanTranslatorContext context) { PlanFragment inputFragment = sort.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(sort.child(0)); if (!sort.getSortPhase().isMerge()) { SortNode sortNode = translateSortNode(sort, inputFragment.getPlanRoot(), context); sortNode.setChildrenDistributeExprLists(distributeExprLists); addPlanRoot(inputFragment, sortNode, sort); } else { if (!(inputFragment.getPlanRoot() instanceof ExchangeNode)) { return inputFragment; } SortNode sortNode = (SortNode) inputFragment.getPlanRoot().getChild(0); ((ExchangeNode) inputFragment.getPlanRoot()).setMergeInfo(sortNode.getSortInfo()); sortNode.setMergeByExchange(); sortNode.setChildrenDistributeExprLists(distributeExprLists); } return inputFragment; } @Override public PlanFragment visitPhysicalTopN(PhysicalTopN<? extends Plan> topN, PlanTranslatorContext context) { PlanFragment inputFragment = topN.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(topN.child(0)); if (!topN.getSortPhase().isMerge()) { SortNode sortNode = translateSortNode(topN, inputFragment.getPlanRoot(), context); sortNode.setOffset(topN.getOffset()); sortNode.setLimit(topN.getLimit()); if (context.getTopnFilterContext().isTopnFilterSource(topN)) { sortNode.setUseTopnOpt(true); context.getTopnFilterContext().getTargets(topN).forEach( olapScan -> { Optional<OlapScanNode> legacyScan = context.getTopnFilterContext().getLegacyScanNode(olapScan); Preconditions.checkState(legacyScan.isPresent(), "cannot find OlapScanNode for topn filter"); legacyScan.get().addTopnFilterSortNode(sortNode); } ); } if (sortNode.getChild(0) instanceof OlapScanNode) { OlapScanNode scanNode = ((OlapScanNode) sortNode.getChild(0)); if (checkPushSort(sortNode, scanNode.getOlapTable())) { SortInfo sortInfo = sortNode.getSortInfo(); scanNode.setSortInfo(sortInfo); scanNode.getSortInfo().setSortTupleSlotExprs(sortNode.getResolvedTupleExprs()); for (Expr expr : sortInfo.getOrderingExprs()) { scanNode.getSortInfo().addMaterializedOrderingExpr(expr); } if (sortNode.getOffset() > 0) { scanNode.setSortLimit(sortNode.getLimit() + sortNode.getOffset()); } else { scanNode.setSortLimit(sortNode.getLimit()); } } } sortNode.setChildrenDistributeExprLists(distributeExprLists); addPlanRoot(inputFragment, sortNode, topN); } else { if (!(inputFragment.getPlanRoot() instanceof ExchangeNode)) { inputFragment.getPlanRoot().setOffset(topN.getOffset()); inputFragment.getPlanRoot().setLimit(topN.getLimit()); return inputFragment; } ExchangeNode exchangeNode = (ExchangeNode) inputFragment.getPlanRoot(); exchangeNode.setChildrenDistributeExprLists(distributeExprLists); exchangeNode.setMergeInfo(((SortNode) exchangeNode.getChild(0)).getSortInfo()); exchangeNode.setLimit(topN.getLimit()); exchangeNode.setOffset(topN.getOffset()); ((SortNode) exchangeNode.getChild(0)).setMergeByExchange(); } updateLegacyPlanIdToPhysicalPlan(inputFragment.getPlanRoot(), topN); return inputFragment; } @Override public PlanFragment visitPhysicalDeferMaterializeTopN(PhysicalDeferMaterializeTopN<? extends Plan> topN, PlanTranslatorContext context) { PlanFragment planFragment = visitPhysicalTopN(topN.getPhysicalTopN(), context); if (planFragment.getPlanRoot() instanceof SortNode) { SortNode sortNode = (SortNode) planFragment.getPlanRoot(); sortNode.setUseTwoPhaseReadOpt(true); sortNode.getSortInfo().setUseTwoPhaseRead(); if (context.getTopnFilterContext().isTopnFilterSource(topN)) { sortNode.setUseTopnOpt(true); context.getTopnFilterContext().getTargets(topN).forEach( olapScan -> { Optional<OlapScanNode> legacyScan = context.getTopnFilterContext().getLegacyScanNode(olapScan); Preconditions.checkState(legacyScan.isPresent(), "cannot find OlapScanNode for topn filter"); legacyScan.get().addTopnFilterSortNode(sortNode); } ); } TupleDescriptor tupleDescriptor = sortNode.getSortInfo().getSortTupleDescriptor(); for (SlotDescriptor slotDescriptor : tupleDescriptor.getSlots()) { if (topN.getDeferMaterializeSlotIds() .contains(context.findExprId(slotDescriptor.getId()))) { slotDescriptor.setNeedMaterialize(false); } } } return planFragment; } @Override public PlanFragment visitPhysicalRepeat(PhysicalRepeat<? extends Plan> repeat, PlanTranslatorContext context) { PlanFragment inputPlanFragment = repeat.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(repeat.child(0)); Set<VirtualSlotReference> sortedVirtualSlots = repeat.getSortedVirtualSlots(); TupleDescriptor virtualSlotsTuple = generateTupleDesc(ImmutableList.copyOf(sortedVirtualSlots), null, context); ImmutableSet<Expression> flattenGroupingSetExprs = ImmutableSet.copyOf( ExpressionUtils.flatExpressions(repeat.getGroupingSets())); List<Slot> aggregateFunctionUsedSlots = repeat.getOutputExpressions() .stream() .filter(output -> !(output instanceof VirtualSlotReference)) .filter(output -> !flattenGroupingSetExprs.contains(output)) .distinct() .map(NamedExpression::toSlot) .collect(ImmutableList.toImmutableList()); List<Expr> preRepeatExprs = Stream.concat(flattenGroupingSetExprs.stream(), aggregateFunctionUsedSlots.stream()) .map(expr -> ExpressionTranslator.translate(expr, context)).collect(ImmutableList.toImmutableList()); List<Slot> outputSlots = Stream.concat( repeat.getOutputExpressions().stream().filter(output -> flattenGroupingSetExprs.contains(output)), repeat.getOutputExpressions().stream().filter(output -> !flattenGroupingSetExprs.contains(output))) .map(NamedExpression::toSlot).collect(ImmutableList.toImmutableList()); TupleDescriptor outputTuple = generateTupleDesc(outputSlots, null, context); GroupingInfo groupingInfo = new GroupingInfo( GroupingType.GROUPING_SETS, virtualSlotsTuple, outputTuple, preRepeatExprs); List<Set<Integer>> repeatSlotIdList = repeat.computeRepeatSlotIdList(getSlotIds(outputTuple)); Set<Integer> allSlotId = repeatSlotIdList.stream() .flatMap(Set::stream) .collect(ImmutableSet.toImmutableSet()); RepeatNode repeatNode = new RepeatNode(context.nextPlanNodeId(), inputPlanFragment.getPlanRoot(), groupingInfo, repeatSlotIdList, allSlotId, repeat.computeVirtualSlotValues(sortedVirtualSlots)); repeatNode.setNereidsId(repeat.getId()); repeatNode.setChildrenDistributeExprLists(distributeExprLists); addPlanRoot(inputPlanFragment, repeatNode, repeat); updateLegacyPlanIdToPhysicalPlan(inputPlanFragment.getPlanRoot(), repeat); return inputPlanFragment; } @Override public PlanFragment visitPhysicalWindow(PhysicalWindow<? extends Plan> physicalWindow, PlanTranslatorContext context) { PlanFragment inputPlanFragment = physicalWindow.child(0).accept(this, context); List<List<Expr>> distributeExprLists = getDistributeExprs(physicalWindow.child(0)); WindowFrameGroup windowFrameGroup = physicalWindow.getWindowFrameGroup(); List<Expression> partitionKeyList = Lists.newArrayList(windowFrameGroup.getPartitionKeys()); List<OrderExpression> orderKeyList = windowFrameGroup.getOrderKeys(); List<NamedExpression> windowFunctionList = windowFrameGroup.getGroups(); WindowFrame windowFrame = windowFrameGroup.getWindowFrame(); List<Expr> partitionExprs = partitionKeyList.stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); List<OrderByElement> orderByElements = orderKeyList.stream() .map(orderKey -> new OrderByElement( ExpressionTranslator.translate(orderKey.child(), context), orderKey.isAsc(), orderKey.isNullFirst())) .collect(Collectors.toList()); List<Expr> analyticFnCalls = windowFunctionList.stream() .map(e -> { Expression function = e.child(0).child(0); if (function instanceof AggregateFunction) { AggregateParam param = AggregateParam.LOCAL_RESULT; function = new AggregateExpression((AggregateFunction) function, param); } return ExpressionTranslator.translate(function, context); }) .map(FunctionCallExpr.class::cast) .peek(fnCall -> { fnCall.setIsAnalyticFnCall(true); ((org.apache.doris.catalog.AggregateFunction) fnCall.getFn()).setIsAnalyticFn(true); }) .collect(Collectors.toList()); AnalyticWindow analyticWindow = physicalWindow.translateWindowFrame(windowFrame, context); Map<ExprId, SlotRef> bufferedSlotRefForWindow = getBufferedSlotRefForWindow(windowFrameGroup, context); TupleDescriptor bufferedTupleDesc = context.getBufferedTupleForWindow(); Expr partitionExprsIsNullableMatched = partitionExprs.isEmpty() ? null : windowExprsHaveMatchedNullable( partitionKeyList, partitionExprs, bufferedSlotRefForWindow); Expr orderElementsIsNullableMatched = orderByElements.isEmpty() ? null : windowExprsHaveMatchedNullable( orderKeyList.stream().map(UnaryNode::child).collect(Collectors.toList()), orderByElements.stream().map(OrderByElement::getExpr).collect(Collectors.toList()), bufferedSlotRefForWindow); List<Slot> windowSlotList = windowFunctionList.stream() .map(NamedExpression::toSlot) .collect(Collectors.toList()); TupleDescriptor outputTupleDesc = generateTupleDesc(windowSlotList, null, context); AnalyticEvalNode analyticEvalNode = new AnalyticEvalNode( context.nextPlanNodeId(), inputPlanFragment.getPlanRoot(), analyticFnCalls, partitionExprs, orderByElements, analyticWindow, outputTupleDesc, outputTupleDesc, partitionExprsIsNullableMatched, orderElementsIsNullableMatched, bufferedTupleDesc ); analyticEvalNode.setNereidsId(physicalWindow.getId()); analyticEvalNode.setChildrenDistributeExprLists(distributeExprLists); PlanNode root = inputPlanFragment.getPlanRoot(); if (root instanceof SortNode) { ((SortNode) root).setIsAnalyticSort(true); } inputPlanFragment.addPlanRoot(analyticEvalNode); if (findOlapScanNodesByPassExchangeAndJoinNode(inputPlanFragment.getPlanRoot())) { inputPlanFragment.setHasColocatePlanNode(true); analyticEvalNode.setColocate(true); if (root instanceof SortNode) { ((SortNode) root).setColocate(true); } } return inputPlanFragment; } /* ******************************************************************************************** * private functions * ******************************************************************************************** */ private PartitionSortNode translatePartitionSortNode(PhysicalPartitionTopN<? extends Plan> partitionTopN, PlanNode childNode, PlanTranslatorContext context) { List<Expr> partitionExprs = partitionTopN.getPartitionKeys().stream() .map(e -> ExpressionTranslator.translate(e, context)) .collect(Collectors.toList()); TupleDescriptor sortTuple = generateTupleDesc(partitionTopN.child().getOutput(), null, context); List<Expr> orderingExprs = Lists.newArrayList(); List<Boolean> ascOrders = Lists.newArrayList(); List<Boolean> nullsFirstParams = Lists.newArrayList(); List<OrderKey> orderKeys = partitionTopN.getOrderKeys(); orderKeys.forEach(k -> { orderingExprs.add(ExpressionTranslator.translate(k.getExpr(), context)); ascOrders.add(k.isAsc()); nullsFirstParams.add(k.isNullFirst()); }); SortInfo sortInfo = new SortInfo(orderingExprs, ascOrders, nullsFirstParams, sortTuple); PartitionSortNode partitionSortNode = new PartitionSortNode(context.nextPlanNodeId(), childNode, partitionTopN.getFunction(), partitionExprs, sortInfo, partitionTopN.hasGlobalLimit(), partitionTopN.getPartitionLimit(), partitionTopN.getPhase()); partitionSortNode.setNereidsId(partitionTopN.getId()); if (partitionTopN.getStats() != null) { partitionSortNode.setCardinality((long) partitionTopN.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(partitionSortNode, partitionTopN); return partitionSortNode; } private SortNode translateSortNode(AbstractPhysicalSort<? extends Plan> sort, PlanNode childNode, PlanTranslatorContext context) { TupleDescriptor sortTuple = generateTupleDesc(sort.child().getOutput(), null, context); List<Expr> orderingExprs = Lists.newArrayList(); List<Boolean> ascOrders = Lists.newArrayList(); List<Boolean> nullsFirstParams = Lists.newArrayList(); List<OrderKey> orderKeys = sort.getOrderKeys(); orderKeys.forEach(k -> { orderingExprs.add(ExpressionTranslator.translate(k.getExpr(), context)); ascOrders.add(k.isAsc()); nullsFirstParams.add(k.isNullFirst()); }); SortInfo sortInfo = new SortInfo(orderingExprs, ascOrders, nullsFirstParams, sortTuple); SortNode sortNode = new SortNode(context.nextPlanNodeId(), childNode, sortInfo, sort instanceof PhysicalTopN); sortNode.setNereidsId(sort.getId()); if (sort.getStats() != null) { sortNode.setCardinality((long) sort.getStats().getRowCount()); } updateLegacyPlanIdToPhysicalPlan(sortNode, sort); return sortNode; } private void updateScanSlotsMaterialization(ScanNode scanNode, Set<SlotId> requiredSlotIdSet, Set<SlotId> requiredByProjectSlotIdSet, List<SlotId> slotIdsByOrder, PlanTranslatorContext context) { SlotDescriptor smallest = scanNode.getTupleDesc().getSlots().get(0); if (CollectionUtils.isNotEmpty(slotIdsByOrder)) { Map<SlotId, SlotDescriptor> idToSlotDescMap = scanNode.getTupleDesc().getSlots().stream() .filter(s -> requiredSlotIdSet.contains(s.getId())) .collect(Collectors.toMap(SlotDescriptor::getId, s -> s)); scanNode.getTupleDesc().getSlots().clear(); for (SlotId slotId : slotIdsByOrder) { scanNode.getTupleDesc().getSlots().add(idToSlotDescMap.get(slotId)); } } else { scanNode.getTupleDesc().getSlots().removeIf(s -> !requiredSlotIdSet.contains(s.getId())); } if (scanNode.getTupleDesc().getSlots().isEmpty()) { scanNode.getTupleDesc().getSlots().add(smallest); } try { if (context.getSessionVariable() != null && context.getSessionVariable().forbidUnknownColStats && !StatisticConstants.isSystemTable(scanNode.getTupleDesc().getTable())) { for (SlotId slotId : requiredByProjectSlotIdSet) { if (context.isColumnStatsUnknown(scanNode, slotId)) { String colName = scanNode.getTupleDesc().getSlot(slotId.asInt()).getColumn().getName(); throw new AnalysisException("meet unknown column stats: " + colName); } } context.removeScanFromStatsUnknownColumnsMap(scanNode); } scanNode.updateRequiredSlots(context, requiredByProjectSlotIdSet); } catch (UserException e) { Util.logAndThrowRuntimeException(LOG, "User Exception while reset external file scan node contexts.", e); } } private void addConjunctsToPlanNode(PhysicalFilter<? extends Plan> filter, PlanNode planNode, PlanTranslatorContext context) { filter.getConjuncts().stream() .map(e -> ExpressionTranslator.translate(e, context)) .forEach(planNode::addConjunct); updateLegacyPlanIdToPhysicalPlan(planNode, filter); } private TupleDescriptor generateTupleDesc(List<Slot> slotList, TableIf table, PlanTranslatorContext context) { TupleDescriptor tupleDescriptor = context.generateTupleDesc(); tupleDescriptor.setTable(table); for (Slot slot : slotList) { context.createSlotDesc(tupleDescriptor, (SlotReference) slot, table); } return tupleDescriptor; } private PlanFragment connectJoinNode(HashJoinNode hashJoinNode, PlanFragment leftFragment, PlanFragment rightFragment, PlanTranslatorContext context, AbstractPlan join) { hashJoinNode.setChild(0, leftFragment.getPlanRoot()); hashJoinNode.setChild(1, rightFragment.getPlanRoot()); setPlanRoot(leftFragment, hashJoinNode, join); context.mergePlanFragment(rightFragment, leftFragment); for (PlanFragment rightChild : rightFragment.getChildren()) { leftFragment.addChild(rightChild); } return leftFragment; } private List<SlotReference> collectGroupBySlots(List<Expression> groupByExpressions, List<NamedExpression> outputExpressions) { List<SlotReference> groupSlots = Lists.newArrayList(); Set<VirtualSlotReference> virtualSlotReferences = groupByExpressions.stream() .filter(VirtualSlotReference.class::isInstance) .map(VirtualSlotReference.class::cast) .collect(Collectors.toSet()); for (Expression e : groupByExpressions) { if (e instanceof SlotReference && outputExpressions.stream().anyMatch(o -> o.anyMatch(e::equals))) { groupSlots.add((SlotReference) e); } else if (e instanceof SlotReference && !virtualSlotReferences.isEmpty()) { groupSlots.add((SlotReference) e); } else { groupSlots.add(new SlotReference(e.toSql(), e.getDataType(), e.nullable(), ImmutableList.of())); } } return groupSlots; } private List<Integer> getSlotIds(TupleDescriptor tupleDescriptor) { return tupleDescriptor.getSlots() .stream() .map(slot -> slot.getId().asInt()) .collect(ImmutableList.toImmutableList()); } private Map<ExprId, SlotRef> getBufferedSlotRefForWindow(WindowFrameGroup windowFrameGroup, PlanTranslatorContext context) { Map<ExprId, SlotRef> bufferedSlotRefForWindow = context.getBufferedSlotRefForWindow(); windowFrameGroup.getPartitionKeys().stream() .map(NamedExpression.class::cast) .forEach(expression -> { ExprId exprId = expression.getExprId(); bufferedSlotRefForWindow.putIfAbsent(exprId, context.findSlotRef(exprId)); }); windowFrameGroup.getOrderKeys().stream() .map(UnaryNode::child) .map(NamedExpression.class::cast) .forEach(expression -> { ExprId exprId = expression.getExprId(); bufferedSlotRefForWindow.putIfAbsent(exprId, context.findSlotRef(exprId)); }); return bufferedSlotRefForWindow; } private Expr windowExprsHaveMatchedNullable(List<Expression> expressions, List<Expr> exprs, Map<ExprId, SlotRef> bufferedSlotRef) { Map<ExprId, Expr> exprIdToExpr = Maps.newHashMap(); for (int i = 0; i < expressions.size(); i++) { NamedExpression expression = (NamedExpression) expressions.get(i); exprIdToExpr.put(expression.getExprId(), exprs.get(i)); } return windowExprsHaveMatchedNullable(exprIdToExpr, bufferedSlotRef, expressions, 0, expressions.size()); } private Expr windowExprsHaveMatchedNullable(Map<ExprId, Expr> exprIdToExpr, Map<ExprId, SlotRef> exprIdToSlotRef, List<Expression> expressions, int i, int size) { if (i > size - 1) { return new BoolLiteral(true); } ExprId exprId = ((NamedExpression) expressions.get(i)).getExprId(); Expr lhs = exprIdToExpr.get(exprId); Expr rhs = exprIdToSlotRef.get(exprId); Expr bothNull = new CompoundPredicate(CompoundPredicate.Operator.AND, new IsNullPredicate(lhs, false, true), new IsNullPredicate(rhs, false, true)); Expr lhsEqRhsNotNull = new CompoundPredicate(CompoundPredicate.Operator.AND, new CompoundPredicate(CompoundPredicate.Operator.AND, new IsNullPredicate(lhs, true, true), new IsNullPredicate(rhs, true, true)), new BinaryPredicate(BinaryPredicate.Operator.EQ, lhs, rhs, Type.BOOLEAN, NullableMode.DEPEND_ON_ARGUMENT)); Expr remainder = windowExprsHaveMatchedNullable(exprIdToExpr, exprIdToSlotRef, expressions, i + 1, size); return new CompoundPredicate(CompoundPredicate.Operator.AND, new CompoundPredicate(CompoundPredicate.Operator.OR, bothNull, lhsEqRhsNotNull), remainder); } private PlanFragment createPlanFragment(PlanNode planNode, DataPartition dataPartition, AbstractPlan physicalPlan) { PlanFragment planFragment = new PlanFragment(context.nextFragmentId(), planNode, dataPartition); updateLegacyPlanIdToPhysicalPlan(planNode, physicalPlan); return planFragment; } private void setPlanRoot(PlanFragment fragment, PlanNode planNode, AbstractPlan physicalPlan) { fragment.setPlanRoot(planNode); updateLegacyPlanIdToPhysicalPlan(planNode, physicalPlan); } private void addPlanRoot(PlanFragment fragment, PlanNode planNode, AbstractPlan physicalPlan) { fragment.addPlanRoot(planNode); updateLegacyPlanIdToPhysicalPlan(planNode, physicalPlan); } private DataPartition toDataPartition(DistributionSpec distributionSpec, List<ExprId> childOutputIds, PlanTranslatorContext context) { if (distributionSpec instanceof DistributionSpecAny || distributionSpec instanceof DistributionSpecStorageAny || distributionSpec instanceof DistributionSpecExecutionAny) { return DataPartition.RANDOM; } else if (distributionSpec instanceof DistributionSpecGather || distributionSpec instanceof DistributionSpecStorageGather || distributionSpec instanceof DistributionSpecReplicated) { return DataPartition.UNPARTITIONED; } else if (distributionSpec instanceof DistributionSpecHash) { DistributionSpecHash distributionSpecHash = (DistributionSpecHash) distributionSpec; List<Expr> partitionExprs = Lists.newArrayList(); for (int i = 0; i < distributionSpecHash.getEquivalenceExprIds().size(); i++) { Set<ExprId> equivalenceExprId = distributionSpecHash.getEquivalenceExprIds().get(i); for (ExprId exprId : equivalenceExprId) { if (childOutputIds.contains(exprId)) { partitionExprs.add(context.findSlotRef(exprId)); break; } } if (partitionExprs.size() != i + 1) { throw new RuntimeException("Cannot translate DistributionSpec to DataPartition," + " DistributionSpec: " + distributionSpec + ", child output: " + childOutputIds); } } TPartitionType partitionType; switch (distributionSpecHash.getShuffleType()) { case STORAGE_BUCKETED: partitionType = TPartitionType.BUCKET_SHFFULE_HASH_PARTITIONED; break; case EXECUTION_BUCKETED: partitionType = TPartitionType.HASH_PARTITIONED; break; case NATURAL: default: throw new RuntimeException("Do not support shuffle type: " + distributionSpecHash.getShuffleType()); } return new DataPartition(partitionType, partitionExprs); } else if (distributionSpec instanceof DistributionSpecTabletIdShuffle) { return DataPartition.TABLET_ID; } else if (distributionSpec instanceof DistributionSpecTableSinkHashPartitioned) { DistributionSpecTableSinkHashPartitioned partitionSpecHash = (DistributionSpecTableSinkHashPartitioned) distributionSpec; List<Expr> partitionExprs = Lists.newArrayList(); List<ExprId> partitionExprIds = partitionSpecHash.getOutputColExprIds(); for (ExprId partitionExprId : partitionExprIds) { if (childOutputIds.contains(partitionExprId)) { partitionExprs.add(context.findSlotRef(partitionExprId)); } } return new DataPartition(TPartitionType.TABLE_SINK_HASH_PARTITIONED, partitionExprs); } else if (distributionSpec instanceof DistributionSpecTableSinkRandomPartitioned) { return new DataPartition(TPartitionType.TABLE_SINK_RANDOM_PARTITIONED); } else { throw new RuntimeException("Unknown DistributionSpec: " + distributionSpec); } } private void updateLegacyPlanIdToPhysicalPlan(PlanNode planNode, AbstractPlan physicalPlan) { if (statsErrorEstimator != null) { statsErrorEstimator.updateLegacyPlanIdToPhysicalPlan(planNode, physicalPlan); } } private void injectRowIdColumnSlot(TupleDescriptor tupleDesc) { SlotDescriptor slotDesc = context.addSlotDesc(tupleDesc); if (LOG.isDebugEnabled()) { LOG.debug("inject slot {}", slotDesc); } String name = Column.ROWID_COL; Column col = new Column(name, Type.STRING, false, null, false, "", "rowid column"); slotDesc.setType(Type.STRING); slotDesc.setColumn(col); slotDesc.setIsNullable(false); slotDesc.setIsMaterialized(true); } /** * topN opt: using storage data ordering to accelerate topn operation. * refer pr: optimize topn query if order by columns is prefix of sort keys of table ( */ private boolean checkPushSort(SortNode sortNode, OlapTable olapTable) { if (sortNode.getLimit() <= 0 || sortNode.getLimit() > context.getSessionVariable().topnOptLimitThreshold) { return false; } if (sortNode.getSortInfo().getIsAscOrder().stream().distinct().count() != 1 || olapTable.isZOrderSort()) { return false; } List<Expr> sortExprs = sortNode.getSortInfo().getOrderingExprs(); List<Boolean> nullsFirsts = sortNode.getSortInfo().getNullsFirst(); List<Boolean> isAscOrders = sortNode.getSortInfo().getIsAscOrder(); if (sortExprs.size() > olapTable.getDataSortInfo().getColNum()) { return false; } List<Column> sortKeyColumns = olapTable.getFullSchema(); if (olapTable.getEnableUniqueKeyMergeOnWrite()) { Map<Integer, Column> clusterKeyMap = new TreeMap<>(); for (Column column : olapTable.getFullSchema()) { if (column.getClusterKeyId() != -1) { clusterKeyMap.put(column.getClusterKeyId(), column); } } if (!clusterKeyMap.isEmpty()) { sortKeyColumns.clear(); sortKeyColumns.addAll(clusterKeyMap.values()); } } for (int i = 0; i < sortExprs.size(); i++) { Column sortColumn = sortKeyColumns.get(i); Expr sortExpr = sortExprs.get(i); if (sortExpr instanceof SlotRef) { SlotRef slotRef = (SlotRef) sortExpr; if (sortColumn.equals(slotRef.getColumn())) { if (sortColumn.isAllowNull() && nullsFirsts.get(i) && !isAscOrders.get(i)) { return false; } } else { return false; } } else { return false; } } return true; } private List<Expr> translateToLegacyConjuncts(Set<Expression> conjuncts) { List<Expr> outputExprs = Lists.newArrayList(); if (conjuncts != null) { conjuncts.stream() .map(e -> ExpressionTranslator.translate(e, context)) .forEach(outputExprs::add); } return outputExprs; } private boolean isComplexDataType(DataType dataType) { return dataType instanceof ArrayType || dataType instanceof MapType || dataType instanceof JsonType || dataType instanceof StructType; } private PhysicalCTEConsumer getCTEConsumerChild(PhysicalPlan root) { if (root == null) { return null; } else if (root instanceof PhysicalCTEConsumer) { return (PhysicalCTEConsumer) root; } else if (root.children().size() != 1) { return null; } else { return getCTEConsumerChild((PhysicalPlan) root.child(0)); } } private boolean findOlapScanNodesByPassExchangeAndJoinNode(PlanNode root) { if (root instanceof OlapScanNode) { return true; } else if (!(root instanceof JoinNodeBase || root instanceof ExchangeNode)) { return root.getChildren().stream().anyMatch(child -> findOlapScanNodesByPassExchangeAndJoinNode(child)); } return false; } private List<List<Expr>> getDistributeExprs(Plan ... children) { List<List<Expr>> distributeExprLists = Lists.newArrayList(); for (Plan child : children) { DistributionSpec spec = ((PhysicalPlan) child).getPhysicalProperties().getDistributionSpec(); distributeExprLists.add(getDistributeExpr(child.getOutputExprIds(), spec)); } return distributeExprLists; } private List<Expr> getDistributeExpr(List<ExprId> childOutputIds, DistributionSpec spec) { if (spec instanceof DistributionSpecHash) { DistributionSpecHash distributionSpecHash = (DistributionSpecHash) spec; List<Expr> partitionExprs = Lists.newArrayList(); for (int i = 0; i < distributionSpecHash.getEquivalenceExprIds().size(); i++) { Set<ExprId> equivalenceExprId = distributionSpecHash.getEquivalenceExprIds().get(i); for (ExprId exprId : equivalenceExprId) { if (childOutputIds.contains(exprId)) { partitionExprs.add(context.findSlotRef(exprId)); break; } } } return partitionExprs; } return Lists.newArrayList(); } }
Why have we commented out these lines?
public void process(ServiceNode serviceNode, List<AnnotationAttachmentNode> annotations) { List<BLangFunction> resources = (List<BLangFunction>) serviceNode.getResources(); resources.forEach(res -> validate(serviceNode.getName().getValue(), res, this.diagnosticLog)); }
public void process(ServiceNode serviceNode, List<AnnotationAttachmentNode> annotations) { List<BLangFunction> resources = (List<BLangFunction>) serviceNode.getResources(); resources.forEach(res -> validate(serviceNode.getName().getValue(), res, this.diagnosticLog)); }
class SocketCompilerPlugin extends AbstractCompilerPlugin { private static final String INVALID_RESOURCE_SIGNATURE = "Invalid resource signature for %s in service %s. "; private DiagnosticLog diagnosticLog = null; private int resourceCount = 0; @Override public void init(DiagnosticLog diagnosticLog) { this.diagnosticLog = diagnosticLog; } @Override private void validate(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { switch (resource.getName().getValue()) { case RESOURCE_ON_CONNECT: case RESOURCE_ON_ACCEPT: validateOnAccept(serviceName, resource, diagnosticLog); resourceCount++; break; case RESOURCE_ON_READ_READY: validateOnReadReady(serviceName, resource, diagnosticLog); resourceCount++; break; case RESOURCE_ON_CLOSE: validateOnClose(serviceName, resource, diagnosticLog); resourceCount++; break; case RESOURCE_ON_ERROR: validateOnError(serviceName, resource, diagnosticLog); resourceCount++; break; default: } } private void validateOnError(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { final List<BLangSimpleVariable> readReadyParams = resource.getParameters(); if (readReadyParams.size() != 2) { String msg = String .format(INVALID_RESOURCE_SIGNATURE + "Parameters should be a 'socket:Caller' and 'error'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); return; } BType caller = readReadyParams.get(0).type; if (OBJECT.equals(caller.getKind()) && caller instanceof BStructureType) { validateEndpointCaller(serviceName, resource, diagnosticLog, (BStructureType) caller); } BType error = readReadyParams.get(1).getTypeNode().type; if (RECORD.equals(error.getKind()) && error instanceof BRecordType) { if (!"error".equals(error.tsymbol.toString())) { String msg = String.format(INVALID_RESOURCE_SIGNATURE + "The second parameter should be an 'error'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); } } } private void validateOnReadReady(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { final List<BLangSimpleVariable> readReadyParams = resource.getParameters(); if (readReadyParams.size() != 2) { String msg = String .format(INVALID_RESOURCE_SIGNATURE + "Parameters should be a 'socket:Caller' and 'byte[]'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); return; } BType caller = readReadyParams.get(0).type; if (OBJECT.equals(caller.getKind()) && caller instanceof BStructureType) { validateEndpointCaller(serviceName, resource, diagnosticLog, (BStructureType) caller); } BType content = readReadyParams.get(1).getTypeNode().type; if (ARRAY.equals(content.getKind()) && content instanceof BArrayType) { if (!"byte".equals(((BArrayType) content).eType.tsymbol.toString())) { String msg = String.format(INVALID_RESOURCE_SIGNATURE + "Second parameter should be a byte[]", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); } } } private void validateOnClose(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { validateOnAccept(serviceName, resource, diagnosticLog); } private void validateOnAccept(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { final List<BLangSimpleVariable> acceptParams = resource.getParameters(); if (acceptParams.size() != 1) { String msg = String.format(INVALID_RESOURCE_SIGNATURE + "The parameter should be a 'socket:Caller'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); return; } BType caller = acceptParams.get(0).type; if (OBJECT.equals(caller.getKind()) && caller instanceof BStructureType) { validateEndpointCaller(serviceName, resource, diagnosticLog, (BStructureType) caller); } } private void validateEndpointCaller(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog, BStructureType event) { String eventType = event.tsymbol.toString(); if (!("ballerina/socket:Listener".equals(eventType) || "ballerina/socket:Client".equals(eventType))) { String msg = String.format(INVALID_RESOURCE_SIGNATURE + "The parameter should be a 'socket:Caller'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); } } }
class SocketCompilerPlugin extends AbstractCompilerPlugin { private static final String INVALID_RESOURCE_SIGNATURE = "Invalid resource signature for %s in service %s. "; private DiagnosticLog diagnosticLog = null; private int resourceCount = 0; @Override public void init(DiagnosticLog diagnosticLog) { this.diagnosticLog = diagnosticLog; } @Override private void validate(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { switch (resource.getName().getValue()) { case RESOURCE_ON_CONNECT: case RESOURCE_ON_ACCEPT: validateOnAccept(serviceName, resource, diagnosticLog); resourceCount++; break; case RESOURCE_ON_READ_READY: validateOnReadReady(serviceName, resource, diagnosticLog); resourceCount++; break; case RESOURCE_ON_CLOSE: validateOnClose(serviceName, resource, diagnosticLog); resourceCount++; break; case RESOURCE_ON_ERROR: validateOnError(serviceName, resource, diagnosticLog); resourceCount++; break; default: } } private void validateOnError(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { final List<BLangSimpleVariable> readReadyParams = resource.getParameters(); if (readReadyParams.size() != 2) { String msg = String .format(INVALID_RESOURCE_SIGNATURE + "Parameters should be a 'socket:Caller' and 'error'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); return; } BType caller = readReadyParams.get(0).type; if (OBJECT.equals(caller.getKind()) && caller instanceof BStructureType) { validateEndpointCaller(serviceName, resource, diagnosticLog, (BStructureType) caller); } BType error = readReadyParams.get(1).getTypeNode().type; if (RECORD.equals(error.getKind()) && error instanceof BRecordType) { if (!"error".equals(error.tsymbol.toString())) { String msg = String.format(INVALID_RESOURCE_SIGNATURE + "The second parameter should be an 'error'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); } } } private void validateOnReadReady(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { final List<BLangSimpleVariable> readReadyParams = resource.getParameters(); if (readReadyParams.size() != 2) { String msg = String .format(INVALID_RESOURCE_SIGNATURE + "Parameters should be a 'socket:Caller' and 'byte[]'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); return; } BType caller = readReadyParams.get(0).type; if (OBJECT.equals(caller.getKind()) && caller instanceof BStructureType) { validateEndpointCaller(serviceName, resource, diagnosticLog, (BStructureType) caller); } BType content = readReadyParams.get(1).getTypeNode().type; if (ARRAY.equals(content.getKind()) && content instanceof BArrayType) { if (!"byte".equals(((BArrayType) content).eType.tsymbol.toString())) { String msg = String.format(INVALID_RESOURCE_SIGNATURE + "Second parameter should be a byte[]", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); } } } private void validateOnClose(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { validateOnAccept(serviceName, resource, diagnosticLog); } private void validateOnAccept(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog) { final List<BLangSimpleVariable> acceptParams = resource.getParameters(); if (acceptParams.size() != 1) { String msg = String.format(INVALID_RESOURCE_SIGNATURE + "The parameter should be a 'socket:Caller'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); return; } BType caller = acceptParams.get(0).type; if (OBJECT.equals(caller.getKind()) && caller instanceof BStructureType) { validateEndpointCaller(serviceName, resource, diagnosticLog, (BStructureType) caller); } } private void validateEndpointCaller(String serviceName, BLangFunction resource, DiagnosticLog diagnosticLog, BStructureType event) { String eventType = event.tsymbol.toString(); if (!("ballerina/socket:Listener".equals(eventType) || "ballerina/socket:Client".equals(eventType))) { String msg = String.format(INVALID_RESOURCE_SIGNATURE + "The parameter should be a 'socket:Caller'", resource.getName().getValue(), serviceName); diagnosticLog.logDiagnostic(ERROR, resource.getPosition(), msg); } } }
WDYM? The class now *potentially* holds two annotation targets. So we first try to create the String representation with the method param AT and if not present, we fallback to the original target.
public String getTargetInfo() { if (target == null) { return ""; } if (methodParameterTarget != null && Kind.METHOD_PARAMETER.equals(methodParameterTarget.kind())) { String method = methodParameterTarget.asMethodParameter().method().name(); if (method.equals(Methods.INIT)) { method = " constructor"; } else { method = " } return "parameter '" + methodParameterTarget.asMethodParameter().name() + "' of " + methodParameterTarget.asMethodParameter().method().declaringClass().name() + method; } switch (target.kind()) { case FIELD: return target.asField().declaringClass().name() + " case METHOD: String param = target.asMethod().parameterName(position); if (param == null || param.isBlank()) { param = "arg" + position; } String method = target.asMethod().name(); if (method.equals(Methods.INIT)) { method = " constructor"; } else { method = " } return "parameter '" + param + "' of " + target.asMethod().declaringClass().name() + method; default: return target.toString(); } }
}
public String getTargetInfo() { if (target == null) { return ""; } switch (target.kind()) { case FIELD: return target.asField().declaringClass().name() + " case METHOD: String param = target.asMethod().parameterName(position); if (param == null || param.isBlank()) { param = "arg" + position; } String method = target.asMethod().name(); if (method.equals(Methods.INIT)) { method = " constructor"; } else { method = " } return "parameter '" + param + "' of " + target.asMethod().declaringClass().name() + method; case METHOD_PARAMETER: String name = methodParameterTarget.asMethodParameter().method().name(); if (name.equals(Methods.INIT)) { name = " constructor"; } else { name = " } return "parameter '" + methodParameterTarget.asMethodParameter().name() + "' of " + methodParameterTarget.asMethodParameter().method().declaringClass().name() + name; default: return target.toString(); } }
class InjectionPointInfo { private static boolean isNamedWithoutValue(AnnotationInstance annotation) { if (annotation.name().equals(DotNames.NAMED)) { AnnotationValue name = annotation.value(); return name == null || name.asString().isEmpty(); } return false; } static InjectionPointInfo fromField(FieldInfo field, ClassInfo beanClass, BeanDeployment beanDeployment, InjectionPointModifier transformer) { Set<AnnotationInstance> qualifiers = new HashSet<>(); Collection<AnnotationInstance> annotations = beanDeployment.getAnnotations(field); for (AnnotationInstance annotation : annotations) { for (AnnotationInstance annotationInstance : beanDeployment.extractQualifiers(annotation)) { if (isNamedWithoutValue(annotationInstance)) { annotationInstance = AnnotationInstance.builder(annotationInstance.name()) .value(field.name()) .buildWithTarget(annotationInstance.target()); } qualifiers.add(annotationInstance); } } Type type = resolveType(field.type(), beanClass, field.declaringClass(), beanDeployment); return new InjectionPointInfo(type, transformer.applyTransformers(type, field, qualifiers), field, null, contains(annotations, DotNames.TRANSIENT_REFERENCE), contains(annotations, DotNames.DELEGATE)); } static InjectionPointInfo fromResourceField(FieldInfo field, ClassInfo beanClass, BeanDeployment beanDeployment, InjectionPointModifier transformer) { Type type = resolveType(field.type(), beanClass, field.declaringClass(), beanDeployment); return new InjectionPointInfo(type, transformer.applyTransformers(type, field, new HashSet<>(Annotations.onlyRuntimeVisible(field.annotations()))), InjectionPointKind.RESOURCE, field, null, false, false); } static List<InjectionPointInfo> fromMethod(MethodInfo method, ClassInfo beanClass, BeanDeployment beanDeployment, InjectionPointModifier transformer) { return fromMethod(method, beanClass, beanDeployment, null, transformer); } static List<InjectionPointInfo> fromMethod(MethodInfo method, ClassInfo beanClass, BeanDeployment beanDeployment, BiPredicate<Set<AnnotationInstance>, Integer> skipPredicate, InjectionPointModifier transformer) { List<InjectionPointInfo> injectionPoints = new ArrayList<>(); for (ListIterator<Type> iterator = method.parameterTypes().listIterator(); iterator.hasNext();) { Type paramType = iterator.next(); int position = iterator.previousIndex(); Set<AnnotationInstance> paramAnnotations = Annotations.getParameterAnnotations(beanDeployment, method, position); if (skipPredicate != null && skipPredicate.test(paramAnnotations, position)) { continue; } Set<AnnotationInstance> paramQualifiers = new HashSet<>(); for (AnnotationInstance paramAnnotation : paramAnnotations) { for (AnnotationInstance annotationInstance : beanDeployment.extractQualifiers(paramAnnotation)) { if (isNamedWithoutValue(annotationInstance)) { throw new DefinitionException("@Named without value may not be used on method parameter: " + method); } paramQualifiers.add(annotationInstance); } } Type type = resolveType(paramType, beanClass, method.declaringClass(), beanDeployment); injectionPoints.add(new InjectionPointInfo(type, transformer.applyTransformers(type, method, method.parameters().get(position), paramQualifiers), method, method.parameters().get(position), contains(paramAnnotations, DotNames.TRANSIENT_REFERENCE), contains(paramAnnotations, DotNames.DELEGATE))); } return injectionPoints; } static InjectionPointInfo fromSyntheticInjectionPoint(TypeAndQualifiers typeAndQualifiers) { return new InjectionPointInfo(typeAndQualifiers, InjectionPointKind.CDI, null, null, false, false); } private final TypeAndQualifiers typeAndQualifiers; private final AtomicReference<BeanInfo> resolvedBean; private final AtomicReference<BeanInfo> targetBean; private final InjectionPointKind kind; private final boolean hasDefaultQualifier; private final AnnotationTarget target; private final AnnotationTarget methodParameterTarget; private final int position; private final boolean isTransientReference; private final boolean isDelegate; InjectionPointInfo(Type requiredType, Set<AnnotationInstance> requiredQualifiers, AnnotationTarget target, AnnotationTarget methodParameterTarget, boolean isTransientReference, boolean isDelegate) { this(requiredType, requiredQualifiers, InjectionPointKind.CDI, target, methodParameterTarget, isTransientReference, isDelegate); } InjectionPointInfo(Type requiredType, Set<AnnotationInstance> requiredQualifiers, InjectionPointKind kind, AnnotationTarget target, AnnotationTarget methodParameterTarget, boolean isTransientReference, boolean isDelegate) { this(new TypeAndQualifiers(requiredType, requiredQualifiers.isEmpty() ? Collections.singleton(AnnotationInstance.create(DotNames.DEFAULT, null, Collections.emptyList())) : requiredQualifiers), kind, target, methodParameterTarget, isTransientReference, isDelegate); } InjectionPointInfo(TypeAndQualifiers typeAndQualifiers, InjectionPointKind kind, AnnotationTarget target, AnnotationTarget methodParameterTarget, boolean isTransientReference, boolean isDelegate) { this.typeAndQualifiers = typeAndQualifiers; this.resolvedBean = new AtomicReference<BeanInfo>(null); this.targetBean = new AtomicReference<BeanInfo>(null); this.kind = kind; this.hasDefaultQualifier = typeAndQualifiers.qualifiers.size() == 1 && typeAndQualifiers.qualifiers.iterator().next().name().equals(DotNames.DEFAULT); this.target = target; this.methodParameterTarget = methodParameterTarget; this.position = (methodParameterTarget == null || !Kind.METHOD_PARAMETER.equals(methodParameterTarget.kind())) ? -1 : methodParameterTarget.asMethodParameter().position(); this.isTransientReference = isTransientReference; this.isDelegate = isDelegate; if (DotNames.EVENT.equals(typeAndQualifiers.type.name()) && typeAndQualifiers.type.kind() == Type.Kind.CLASS) { throw new DefinitionException( "Event injection point can never be raw type - please specify the type parameter. Injection point: " + target); } } void resolve(BeanInfo bean) { resolvedBean.set(bean); } public BeanInfo getResolvedBean() { return resolvedBean.get(); } public Optional<BeanInfo> getTargetBean() { return Optional.ofNullable(targetBean.get()); } public void setTargetBean(BeanInfo bean) { this.targetBean.set(bean); } InjectionPointKind getKind() { return kind; } /** * Note that for programmatic lookup, the required type is the type parameter specified at the injection point. For example, * the required type for an injection point of type {@code Instance<org.acme.Foo>} is {@code org.acme.Foo}. * * @return the required type of this injection point */ public Type getRequiredType() { Type requiredType = typeAndQualifiers.type; if (isProgrammaticLookup() && requiredType.kind() == org.jboss.jandex.Type.Kind.PARAMETERIZED_TYPE) { requiredType = requiredType.asParameterizedType().arguments().get(0); } return requiredType; } /** * This method always returns the original type declared on the injection point, unlike {@link * * @return the type specified at the injection point */ public Type getType() { return typeAndQualifiers.type; } /** * @return <code>true</code> if this injection represents a dynamically obtained instance, <code>false</code> otherwise */ public boolean isProgrammaticLookup() { DotName requiredTypeName = typeAndQualifiers.type.name(); return DotNames.INSTANCE.equals(requiredTypeName) || DotNames.INJECTABLE_INSTANCE.equals(requiredTypeName) || DotNames.PROVIDER.equals(requiredTypeName); } public Set<AnnotationInstance> getRequiredQualifiers() { return typeAndQualifiers.qualifiers; } public AnnotationInstance getRequiredQualifier(DotName name) { for (AnnotationInstance qualifier : typeAndQualifiers.qualifiers) { if (qualifier.name().equals(name)) { return qualifier; } } return null; } public boolean hasDefaultedQualifier() { return hasDefaultQualifier; } TypeAndQualifiers getTypeAndQualifiers() { return typeAndQualifiers; } /** * This method is deprecated and will be removed at some point after Quarkus 3.12. * Use {@link * Both methods behave equally except for method parameter injection points where {@code getAnnotationTarget()} * returns method parameter as {@link AnnotationTarget} instead of the whole method. * <p> * For injected params, this method returns the corresponding method and not the param itself. * * @return the annotation target or {@code null} in case of synthetic injection point */ @Deprecated(forRemoval = true, since = "3.12") public AnnotationTarget getTarget() { return target; } /** * Unlike {@link * * @return the annotation target or {@code null} in case of synthetic injection point */ public AnnotationTarget getAnnotationTarget() { return methodParameterTarget == null ? target : methodParameterTarget; } public boolean isField() { return target != null && target.kind() == Kind.FIELD; } public boolean isParam() { return methodParameterTarget != null && methodParameterTarget.kind() == Kind.METHOD_PARAMETER; } public boolean isTransient() { return isField() && Modifier.isTransient(target.asField().flags()); } /** * * @return true if this injection point represents a method parameter annotated with {@code TransientReference} that * resolves to a dependent bean */ boolean isDependentTransientReference() { BeanInfo bean = getResolvedBean(); return bean != null && isParam() && BuiltinScope.DEPENDENT.is(bean.getScope()) && isTransientReference; } public boolean isTransientReference() { return isTransientReference; } public boolean isDelegate() { return isDelegate; } public boolean hasResolvedBean() { return resolvedBean.get() != null; } /** * @return the parameter position or {@code -1} for a field injection point or synthetic injection point */ public int getPosition() { return position; } /** * @return {@code true} if it represents a synthetic injection point, {@code false} otherwise */ public boolean isSynthetic() { return target == null; } /** * If an injection point resolves to a dependent bean that (A) injects the InjectionPoint metadata or (2) is synthetic, then * we need to wrap the injectable reference provider. * * @return {@code true} if a wrapper is needed, {@code false} otherwise */ boolean isCurrentInjectionPointWrapperNeeded() { BeanInfo bean = getResolvedBean(); if (bean != null && BuiltinScope.DEPENDENT.is(bean.getScope())) { return bean.isSynthetic() || bean.requiresInjectionPointMetadata(); } return false; } @Override public String toString() { return "InjectionPointInfo [requiredType=" + typeAndQualifiers.type + ", requiredQualifiers=" + typeAndQualifiers.qualifiers + "]"; } private static Type resolveType(Type type, ClassInfo beanClass, ClassInfo declaringClass, BeanDeployment beanDeployment) { if (type.kind() == Type.Kind.PRIMITIVE || type.kind() == Type.Kind.CLASS) { return type; } Map<ClassInfo, Map<String, Type>> resolvedTypeVariables = Types.resolvedTypeVariables(beanClass, beanDeployment); return resolveType(type, declaringClass, beanDeployment, resolvedTypeVariables); } private static Type resolveType(Type type, ClassInfo beanClass, BeanDeployment beanDeployment, Map<ClassInfo, Map<String, Type>> resolvedTypeVariables) { if (type.kind() == org.jboss.jandex.Type.Kind.TYPE_VARIABLE) { if (resolvedTypeVariables.containsKey(beanClass)) { return resolvedTypeVariables.get(beanClass).getOrDefault(type.asTypeVariable().identifier(), type); } } else if (type.kind() == org.jboss.jandex.Type.Kind.PARAMETERIZED_TYPE) { ParameterizedType parameterizedType = type.asParameterizedType(); Type[] typeParams = new Type[parameterizedType.arguments().size()]; for (int i = 0; i < typeParams.length; i++) { Type argument = parameterizedType.arguments().get(i); if (argument.kind() == org.jboss.jandex.Type.Kind.TYPE_VARIABLE || argument.kind() == org.jboss.jandex.Type.Kind.PARAMETERIZED_TYPE) { typeParams[i] = resolveType(argument, beanClass, beanDeployment, resolvedTypeVariables); } else { typeParams[i] = argument; } } return ParameterizedType.create(parameterizedType.name(), typeParams, parameterizedType.owner()); } else if (type.kind() == org.jboss.jandex.Type.Kind.ARRAY) { ArrayType arrayType = type.asArrayType(); Type component = arrayType.constituent(); if (component.kind() == org.jboss.jandex.Type.Kind.TYPE_VARIABLE || component.kind() == org.jboss.jandex.Type.Kind.PARAMETERIZED_TYPE) { component = resolveType(component, beanClass, beanDeployment, resolvedTypeVariables); } return ArrayType.create(component, type.asArrayType().dimensions()); } return type; } enum InjectionPointKind { CDI, RESOURCE } public static class TypeAndQualifiers { public final Type type; public final Set<AnnotationInstance> qualifiers; public TypeAndQualifiers(Type type, Set<AnnotationInstance> qualifiers) { this.type = type; this.qualifiers = qualifiers; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + annotationSetHashCode(qualifiers); result = prime * result + ((type == null) ? 0 : type.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } TypeAndQualifiers other = (TypeAndQualifiers) obj; if (qualifiers == null) { if (other.qualifiers != null) { return false; } } else if (!annotationSetEquals(qualifiers, other.qualifiers)) { return false; } if (type == null) { if (other.type != null) { return false; } } else if (!type.equals(other.type)) { return false; } return true; } private static boolean annotationSetEquals(Set<AnnotationInstance> s1, Set<AnnotationInstance> s2) { if (s1 == s2) { return true; } if (s1.size() != s2.size()) { return false; } for (AnnotationInstance a1 : s1) { for (AnnotationInstance a2 : s2) { if (!annotationEquals(a1, a2)) { return false; } } } return true; } private static boolean annotationEquals(AnnotationInstance a1, AnnotationInstance a2) { if (a1 == a2) { return true; } return a1.name().equals(a2.name()) && a1.values().equals(a2.values()); } private static int annotationSetHashCode(Set<AnnotationInstance> s) { int result = 1; for (AnnotationInstance a : s) { result = 31 * result + annotationHashCode(a); } return result; } private static int annotationHashCode(AnnotationInstance a) { int result = a.name().hashCode(); result = 31 * result + a.values().hashCode(); return result; } } }
class InjectionPointInfo { private static boolean isNamedWithoutValue(AnnotationInstance annotation) { if (annotation.name().equals(DotNames.NAMED)) { AnnotationValue name = annotation.value(); return name == null || name.asString().isEmpty(); } return false; } static InjectionPointInfo fromField(FieldInfo field, ClassInfo beanClass, BeanDeployment beanDeployment, InjectionPointModifier transformer) { Set<AnnotationInstance> qualifiers = new HashSet<>(); Collection<AnnotationInstance> annotations = beanDeployment.getAnnotations(field); for (AnnotationInstance annotation : annotations) { for (AnnotationInstance annotationInstance : beanDeployment.extractQualifiers(annotation)) { if (isNamedWithoutValue(annotationInstance)) { annotationInstance = AnnotationInstance.builder(annotationInstance.name()) .value(field.name()) .buildWithTarget(annotationInstance.target()); } qualifiers.add(annotationInstance); } } Type type = resolveType(field.type(), beanClass, field.declaringClass(), beanDeployment); return new InjectionPointInfo(type, transformer.applyTransformers(type, field, qualifiers), field, null, contains(annotations, DotNames.TRANSIENT_REFERENCE), contains(annotations, DotNames.DELEGATE)); } static InjectionPointInfo fromResourceField(FieldInfo field, ClassInfo beanClass, BeanDeployment beanDeployment, InjectionPointModifier transformer) { Type type = resolveType(field.type(), beanClass, field.declaringClass(), beanDeployment); return new InjectionPointInfo(type, transformer.applyTransformers(type, field, new HashSet<>(Annotations.onlyRuntimeVisible(field.annotations()))), InjectionPointKind.RESOURCE, field, null, false, false); } static List<InjectionPointInfo> fromMethod(MethodInfo method, ClassInfo beanClass, BeanDeployment beanDeployment, InjectionPointModifier transformer) { return fromMethod(method, beanClass, beanDeployment, null, transformer); } static List<InjectionPointInfo> fromMethod(MethodInfo method, ClassInfo beanClass, BeanDeployment beanDeployment, BiPredicate<Set<AnnotationInstance>, Integer> skipPredicate, InjectionPointModifier transformer) { List<InjectionPointInfo> injectionPoints = new ArrayList<>(); for (ListIterator<Type> iterator = method.parameterTypes().listIterator(); iterator.hasNext();) { Type paramType = iterator.next(); int position = iterator.previousIndex(); Set<AnnotationInstance> paramAnnotations = Annotations.getParameterAnnotations(beanDeployment, method, position); if (skipPredicate != null && skipPredicate.test(paramAnnotations, position)) { continue; } Set<AnnotationInstance> paramQualifiers = new HashSet<>(); for (AnnotationInstance paramAnnotation : paramAnnotations) { for (AnnotationInstance annotationInstance : beanDeployment.extractQualifiers(paramAnnotation)) { if (isNamedWithoutValue(annotationInstance)) { throw new DefinitionException("@Named without value may not be used on method parameter: " + method); } paramQualifiers.add(annotationInstance); } } Type type = resolveType(paramType, beanClass, method.declaringClass(), beanDeployment); injectionPoints.add(new InjectionPointInfo(type, transformer.applyTransformers(type, method, method.parameters().get(position), paramQualifiers), method, method.parameters().get(position), contains(paramAnnotations, DotNames.TRANSIENT_REFERENCE), contains(paramAnnotations, DotNames.DELEGATE))); } return injectionPoints; } static InjectionPointInfo fromSyntheticInjectionPoint(TypeAndQualifiers typeAndQualifiers) { return new InjectionPointInfo(typeAndQualifiers, InjectionPointKind.CDI, null, null, false, false); } private final TypeAndQualifiers typeAndQualifiers; private final AtomicReference<BeanInfo> resolvedBean; private final AtomicReference<BeanInfo> targetBean; private final InjectionPointKind kind; private final boolean hasDefaultQualifier; private final AnnotationTarget target; private final AnnotationTarget methodParameterTarget; private final int position; private final boolean isTransientReference; private final boolean isDelegate; InjectionPointInfo(Type requiredType, Set<AnnotationInstance> requiredQualifiers, AnnotationTarget target, AnnotationTarget methodParameterTarget, boolean isTransientReference, boolean isDelegate) { this(requiredType, requiredQualifiers, InjectionPointKind.CDI, target, methodParameterTarget, isTransientReference, isDelegate); } InjectionPointInfo(Type requiredType, Set<AnnotationInstance> requiredQualifiers, InjectionPointKind kind, AnnotationTarget target, AnnotationTarget methodParameterTarget, boolean isTransientReference, boolean isDelegate) { this(new TypeAndQualifiers(requiredType, requiredQualifiers.isEmpty() ? Collections.singleton(AnnotationInstance.create(DotNames.DEFAULT, null, Collections.emptyList())) : requiredQualifiers), kind, target, methodParameterTarget, isTransientReference, isDelegate); } InjectionPointInfo(TypeAndQualifiers typeAndQualifiers, InjectionPointKind kind, AnnotationTarget target, AnnotationTarget methodParameterTarget, boolean isTransientReference, boolean isDelegate) { this.typeAndQualifiers = typeAndQualifiers; this.resolvedBean = new AtomicReference<BeanInfo>(null); this.targetBean = new AtomicReference<BeanInfo>(null); this.kind = kind; this.hasDefaultQualifier = typeAndQualifiers.qualifiers.size() == 1 && typeAndQualifiers.qualifiers.iterator().next().name().equals(DotNames.DEFAULT); this.target = target; this.methodParameterTarget = methodParameterTarget; this.position = (methodParameterTarget == null || !Kind.METHOD_PARAMETER.equals(methodParameterTarget.kind())) ? -1 : methodParameterTarget.asMethodParameter().position(); this.isTransientReference = isTransientReference; this.isDelegate = isDelegate; if (DotNames.EVENT.equals(typeAndQualifiers.type.name()) && typeAndQualifiers.type.kind() == Type.Kind.CLASS) { throw new DefinitionException( "Event injection point can never be raw type - please specify the type parameter. Injection point: " + target); } } void resolve(BeanInfo bean) { resolvedBean.set(bean); } public BeanInfo getResolvedBean() { return resolvedBean.get(); } public Optional<BeanInfo> getTargetBean() { return Optional.ofNullable(targetBean.get()); } public void setTargetBean(BeanInfo bean) { this.targetBean.set(bean); } InjectionPointKind getKind() { return kind; } /** * Note that for programmatic lookup, the required type is the type parameter specified at the injection point. For example, * the required type for an injection point of type {@code Instance<org.acme.Foo>} is {@code org.acme.Foo}. * * @return the required type of this injection point */ public Type getRequiredType() { Type requiredType = typeAndQualifiers.type; if (isProgrammaticLookup() && requiredType.kind() == org.jboss.jandex.Type.Kind.PARAMETERIZED_TYPE) { requiredType = requiredType.asParameterizedType().arguments().get(0); } return requiredType; } /** * This method always returns the original type declared on the injection point, unlike {@link * * @return the type specified at the injection point */ public Type getType() { return typeAndQualifiers.type; } /** * @return <code>true</code> if this injection represents a dynamically obtained instance, <code>false</code> otherwise */ public boolean isProgrammaticLookup() { DotName requiredTypeName = typeAndQualifiers.type.name(); return DotNames.INSTANCE.equals(requiredTypeName) || DotNames.INJECTABLE_INSTANCE.equals(requiredTypeName) || DotNames.PROVIDER.equals(requiredTypeName); } public Set<AnnotationInstance> getRequiredQualifiers() { return typeAndQualifiers.qualifiers; } public AnnotationInstance getRequiredQualifier(DotName name) { for (AnnotationInstance qualifier : typeAndQualifiers.qualifiers) { if (qualifier.name().equals(name)) { return qualifier; } } return null; } public boolean hasDefaultedQualifier() { return hasDefaultQualifier; } TypeAndQualifiers getTypeAndQualifiers() { return typeAndQualifiers; } /** * This method is deprecated and will be removed at some point after Quarkus 3.15. * Use {@link * Both methods behave equally except for method parameter injection points where {@code getAnnotationTarget()} * returns method parameter as {@link AnnotationTarget} instead of the whole method. * <p> * For injected params, this method returns the corresponding method and not the param itself. * * @return the annotation target or {@code null} in case of synthetic injection point */ @Deprecated(forRemoval = true, since = "3.12") public AnnotationTarget getTarget() { return target; } /** * Unlike {@link * * @return the annotation target or {@code null} in case of synthetic injection point */ public AnnotationTarget getAnnotationTarget() { return methodParameterTarget == null ? target : methodParameterTarget; } public boolean isField() { return target != null && target.kind() == Kind.FIELD; } public boolean isParam() { return methodParameterTarget != null && methodParameterTarget.kind() == Kind.METHOD_PARAMETER; } public boolean isTransient() { return isField() && Modifier.isTransient(target.asField().flags()); } /** * * @return true if this injection point represents a method parameter annotated with {@code TransientReference} that * resolves to a dependent bean */ boolean isDependentTransientReference() { BeanInfo bean = getResolvedBean(); return bean != null && isParam() && BuiltinScope.DEPENDENT.is(bean.getScope()) && isTransientReference; } public boolean isTransientReference() { return isTransientReference; } public boolean isDelegate() { return isDelegate; } public boolean hasResolvedBean() { return resolvedBean.get() != null; } /** * @return the parameter position or {@code -1} for a field injection point or synthetic injection point */ public int getPosition() { return position; } /** * @return {@code true} if it represents a synthetic injection point, {@code false} otherwise */ public boolean isSynthetic() { return target == null; } /** * If an injection point resolves to a dependent bean that (A) injects the InjectionPoint metadata or (2) is synthetic, then * we need to wrap the injectable reference provider. * * @return {@code true} if a wrapper is needed, {@code false} otherwise */ boolean isCurrentInjectionPointWrapperNeeded() { BeanInfo bean = getResolvedBean(); if (bean != null && BuiltinScope.DEPENDENT.is(bean.getScope())) { return bean.isSynthetic() || bean.requiresInjectionPointMetadata(); } return false; } @Override public String toString() { return "InjectionPointInfo [requiredType=" + typeAndQualifiers.type + ", requiredQualifiers=" + typeAndQualifiers.qualifiers + "]"; } private static Type resolveType(Type type, ClassInfo beanClass, ClassInfo declaringClass, BeanDeployment beanDeployment) { if (type.kind() == Type.Kind.PRIMITIVE || type.kind() == Type.Kind.CLASS) { return type; } Map<ClassInfo, Map<String, Type>> resolvedTypeVariables = Types.resolvedTypeVariables(beanClass, beanDeployment); return resolveType(type, declaringClass, beanDeployment, resolvedTypeVariables); } private static Type resolveType(Type type, ClassInfo beanClass, BeanDeployment beanDeployment, Map<ClassInfo, Map<String, Type>> resolvedTypeVariables) { if (type.kind() == org.jboss.jandex.Type.Kind.TYPE_VARIABLE) { if (resolvedTypeVariables.containsKey(beanClass)) { return resolvedTypeVariables.get(beanClass).getOrDefault(type.asTypeVariable().identifier(), type); } } else if (type.kind() == org.jboss.jandex.Type.Kind.PARAMETERIZED_TYPE) { ParameterizedType parameterizedType = type.asParameterizedType(); Type[] typeParams = new Type[parameterizedType.arguments().size()]; for (int i = 0; i < typeParams.length; i++) { Type argument = parameterizedType.arguments().get(i); if (argument.kind() == org.jboss.jandex.Type.Kind.TYPE_VARIABLE || argument.kind() == org.jboss.jandex.Type.Kind.PARAMETERIZED_TYPE) { typeParams[i] = resolveType(argument, beanClass, beanDeployment, resolvedTypeVariables); } else { typeParams[i] = argument; } } return ParameterizedType.create(parameterizedType.name(), typeParams, parameterizedType.owner()); } else if (type.kind() == org.jboss.jandex.Type.Kind.ARRAY) { ArrayType arrayType = type.asArrayType(); Type component = arrayType.constituent(); if (component.kind() == org.jboss.jandex.Type.Kind.TYPE_VARIABLE || component.kind() == org.jboss.jandex.Type.Kind.PARAMETERIZED_TYPE) { component = resolveType(component, beanClass, beanDeployment, resolvedTypeVariables); } return ArrayType.create(component, type.asArrayType().dimensions()); } return type; } enum InjectionPointKind { CDI, RESOURCE } public static class TypeAndQualifiers { public final Type type; public final Set<AnnotationInstance> qualifiers; public TypeAndQualifiers(Type type, Set<AnnotationInstance> qualifiers) { this.type = type; this.qualifiers = qualifiers; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + annotationSetHashCode(qualifiers); result = prime * result + ((type == null) ? 0 : type.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } TypeAndQualifiers other = (TypeAndQualifiers) obj; if (qualifiers == null) { if (other.qualifiers != null) { return false; } } else if (!annotationSetEquals(qualifiers, other.qualifiers)) { return false; } if (type == null) { if (other.type != null) { return false; } } else if (!type.equals(other.type)) { return false; } return true; } private static boolean annotationSetEquals(Set<AnnotationInstance> s1, Set<AnnotationInstance> s2) { if (s1 == s2) { return true; } if (s1.size() != s2.size()) { return false; } for (AnnotationInstance a1 : s1) { for (AnnotationInstance a2 : s2) { if (!annotationEquals(a1, a2)) { return false; } } } return true; } private static boolean annotationEquals(AnnotationInstance a1, AnnotationInstance a2) { if (a1 == a2) { return true; } return a1.name().equals(a2.name()) && a1.values().equals(a2.values()); } private static int annotationSetHashCode(Set<AnnotationInstance> s) { int result = 1; for (AnnotationInstance a : s) { result = 31 * result + annotationHashCode(a); } return result; } private static int annotationHashCode(AnnotationInstance a) { int result = a.name().hashCode(); result = 31 * result + a.values().hashCode(); return result; } } }
Oh, that was because I don't use the consumer created in beforeTest() and create my own. I'll remove it. It's not necessary.
public void receiveUntilTimeoutMultipleTimes() throws IOException { this.consumer.close(); this.consumer = null; final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final List<EventData> events2 = getEventsAsList(numberOfEvents2, TestUtils.MESSAGE_TRACKING_ID); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(3)); System.out.println("Sending second batch."); producer.send(events2); System.out.println("Receiving second batch."); final IterableStream<EventData> receive2 = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); final List<EventData> asList2 = receive2.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents2, asList2.size()); } finally { dispose(consumer, producer); } }
this.consumer.close();
public void receiveUntilTimeoutMultipleTimes() { final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final List<EventData> events2 = getEventsAsList(numberOfEvents2); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(3)); logger.info("Sending second batch."); producer.send(events2); logger.info("Receiving second batch."); final IterableStream<EventData> receive2 = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); final List<EventData> asList2 = receive2.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents2, asList2.size()); } finally { dispose(consumer, producer); } }
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventHubClient client; private EventHubConsumer consumer; public EventHubConsumerIntegrationTest() { super(new ClientLogger(EventHubConsumerIntegrationTest.class)); } @Rule public TestName testName = new TestName(); @Override protected String getTestName() { return testName.getMethodName(); } @Override protected void beforeTest() { super.beforeTest(); client = new EventHubClientBuilder() .connectionString(getConnectionString()) .retry(RETRY_OPTIONS) .buildClient(); if (HAS_PUSHED_EVENTS.getAndSet(true)) { logger.info("Already pushed events to partition. Skipping."); } else { final EventHubProducerOptions options = new EventHubProducerOptions().setPartitionId(PARTITION_ID); testData = setupEventTestData(client, NUMBER_OF_EVENTS, options); } consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromEnqueuedTime(testData.getEnqueuedTime())); } @Override protected void afterTest() { dispose(consumer, client); } /** * Verifies that we can receive events a single time that is up to the batch size. */ @Test public void receiveEvents() { final int numberOfEvents = 5; final IterableStream<EventData> actual = consumer.receive(numberOfEvents, Duration.ofSeconds(10)); final List<EventData> asList = actual.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } /** * Verifies that we can receive multiple times. */ @Test public void receiveEventsMultipleTimes() { final int numberOfEvents = 5; final int secondNumberOfEvents = 2; final Duration waitTime = Duration.ofSeconds(10); final IterableStream<EventData> actual = consumer.receive(numberOfEvents, waitTime); final IterableStream<EventData> actual2 = consumer.receive(secondNumberOfEvents, waitTime); final Map<Long, EventData> asList = actual.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(numberOfEvents, asList.size()); final Map<Long, EventData> asList2 = actual2.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(secondNumberOfEvents, asList2.size()); final Long maximumSequence = Collections.max(asList.keySet()); final Long minimumSequence = Collections.min(asList2.keySet()); Assert.assertTrue("The minimum in second receive should be less than first receive.", maximumSequence < minimumSequence); } /** * Verify that we can receive until the timeout. */ @Test public void receiveUntilTimeout() { final int numberOfEvents = 15; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } finally { dispose(producer, consumer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void doesNotContinueToReceiveEvents() { final int numberOfEvents = 15; final int secondSetOfEvents = 25; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final List<EventData> events2 = getEventsAsList(secondSetOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); producer.send(events2); } finally { dispose(consumer, producer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void multipleConsumers() { final int numberOfEvents = 15; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubConsumer consumer2 = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final IterableStream<EventData> receive2 = consumer2.receive(receiveNumber, Duration.ofSeconds(5)); final List<Long> asList = receive.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); final List<Long> asList2 = receive2.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); Assert.assertEquals(receiveNumber, asList2.size()); Collections.sort(asList); Collections.sort(asList2); final Long[] first = asList.toArray(new Long[0]); final Long[] second = asList2.toArray(new Long[0]); Assert.assertArrayEquals(first, second); } finally { dispose(consumer, producer); } } /** * Verify that we can receive until the timeout multiple times. */ @Test private static List<EventData> getEventsAsList(int numberOfEvents, String messageId) { return TestUtils.getEvents(numberOfEvents, messageId).collectList().block(); } }
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventHubClient client; private EventHubConsumer consumer; public EventHubConsumerIntegrationTest() { super(new ClientLogger(EventHubConsumerIntegrationTest.class)); } @Rule public TestName testName = new TestName(); @Override protected String getTestName() { return testName.getMethodName(); } @Override protected void beforeTest() { super.beforeTest(); client = new EventHubClientBuilder() .connectionString(getConnectionString()) .scheduler(Schedulers.single()) .retry(RETRY_OPTIONS) .buildClient(); if (HAS_PUSHED_EVENTS.getAndSet(true)) { logger.info("Already pushed events to partition. Skipping."); } else { final EventHubProducerOptions options = new EventHubProducerOptions().setPartitionId(PARTITION_ID); testData = setupEventTestData(client, NUMBER_OF_EVENTS, options); } consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromEnqueuedTime(testData.getEnqueuedTime())); } @Override protected void afterTest() { dispose(consumer, client); } /** * Verifies that we can receive events a single time that is up to the batch size. */ @Test public void receiveEvents() { final int numberOfEvents = 5; final IterableStream<EventData> actual = consumer.receive(numberOfEvents, Duration.ofSeconds(10)); final List<EventData> asList = actual.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } /** * Verifies that we can receive multiple times. */ @Test public void receiveEventsMultipleTimes() { final int numberOfEvents = 5; final int secondNumberOfEvents = 2; final Duration waitTime = Duration.ofSeconds(10); final IterableStream<EventData> actual = consumer.receive(numberOfEvents, waitTime); final IterableStream<EventData> actual2 = consumer.receive(secondNumberOfEvents, waitTime); final Map<Long, EventData> asList = actual.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(numberOfEvents, asList.size()); final Map<Long, EventData> asList2 = actual2.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(secondNumberOfEvents, asList2.size()); final Long maximumSequence = Collections.max(asList.keySet()); final Long minimumSequence = Collections.min(asList2.keySet()); Assert.assertTrue("The minimum in second receive should be less than first receive.", maximumSequence < minimumSequence); } /** * Verify that we can receive until the timeout. */ @Test public void receiveUntilTimeout() { final int numberOfEvents = 15; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } finally { dispose(producer, consumer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void doesNotContinueToReceiveEvents() { final int numberOfEvents = 15; final int secondSetOfEvents = 25; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final List<EventData> events2 = getEventsAsList(secondSetOfEvents); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); producer.send(events2); } finally { dispose(consumer, producer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void multipleConsumers() { final int numberOfEvents = 15; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubConsumer consumer2 = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final IterableStream<EventData> receive2 = consumer2.receive(receiveNumber, Duration.ofSeconds(5)); final List<Long> asList = receive.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); final List<Long> asList2 = receive2.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); Assert.assertEquals(receiveNumber, asList2.size()); Collections.sort(asList); Collections.sort(asList2); final Long[] first = asList.toArray(new Long[0]); final Long[] second = asList2.toArray(new Long[0]); Assert.assertArrayEquals(first, second); } finally { dispose(consumer, producer); } } /** * Verify that we can receive until the timeout multiple times. */ @Test private static List<EventData> getEventsAsList(int numberOfEvents) { return TestUtils.getEvents(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID).collectList().block(); } }
Yea, recording the annotation processing flow and redirecting it to a different element was my preference as well, but I could not see a way to do this because in order to redirect the annotation processing we would need the Visitor instance from the generated accessor method so we could delegate to it. However, the visitor for the generated getter doesn't get created until later on in the code flow after we've processed all of the fields. So at this point it's a dead-end chicken-and-the-egg issue because we cannot delegate to the getter method visitor until after we've scanned the field and determined we need to generate a getter in the first place. I'm pretty confident in the code and the test coverage (as long as the scope remains moving JAX-B annotations and not other annotations), so I could certainly add a `FIXME` to put myself on the hook for maintaining it in the future.
public FieldVisitor visitField(int access, String name, String descriptor, String signature, Object value) { FieldVisitor superVisitor = super.visitField(access, name, descriptor, signature, value); EntityField ef = fields.get(name); if (fields == null || ef == null) return superVisitor; ef.signature = signature; return new FieldVisitor(Opcodes.ASM7, superVisitor) { private Set<String> descriptors = new HashSet<>(); @Override public AnnotationVisitor visitAnnotation(String descriptor, boolean visible) { descriptors.add(descriptor); if (!descriptor.startsWith(JAXB_ANNOTATION_PREFIX)) { return super.visitAnnotation(descriptor, visible); } EntityFieldAnnotation efAnno = new EntityFieldAnnotation(descriptor); ef.annotations.add(efAnno); return new AnnotationVisitor(Opcodes.ASM7) { @Override public void visit(String name, Object value) { efAnno.name = name; efAnno.value = value; } }; } @Override public void visitEnd() { super.visitAnnotation(JAXB_TRANSIENT_SIGNATURE, true); super.visitEnd(); } }; }
return new AnnotationVisitor(Opcodes.ASM7) {
public FieldVisitor visitField(int access, String name, String descriptor, String signature, Object value) { FieldVisitor superVisitor = super.visitField(access, name, descriptor, signature, value); EntityField ef = fields.get(name); if (fields == null || ef == null) return superVisitor; ef.signature = signature; return new FieldVisitor(Opcodes.ASM7, superVisitor) { private Set<String> descriptors = new HashSet<>(); @Override public AnnotationVisitor visitAnnotation(String descriptor, boolean visible) { descriptors.add(descriptor); if (!descriptor.startsWith(JAXB_ANNOTATION_PREFIX)) { return super.visitAnnotation(descriptor, visible); } else { EntityFieldAnnotation efAnno = new EntityFieldAnnotation(descriptor); ef.annotations.add(efAnno); return new PanacheMovingAnnotationVisitor(efAnno); } } @Override public void visitEnd() { super.visitAnnotation(JAXB_TRANSIENT_SIGNATURE, true); super.visitEnd(); } }; }
class PanacheEntityClassVisitor<EntityFieldType extends EntityField> extends ClassVisitor { protected Type thisClass; protected Map<String, ? extends EntityFieldType> fields; private Set<String> methods = new HashSet<>(); private MetamodelInfo<?> modelInfo; private ClassInfo panacheEntityBaseClassInfo; public PanacheEntityClassVisitor(String className, ClassVisitor outputClassVisitor, MetamodelInfo<? extends EntityModel<? extends EntityFieldType>> modelInfo, ClassInfo panacheEntityBaseClassInfo) { super(Opcodes.ASM7, outputClassVisitor); thisClass = Type.getType("L" + className.replace('.', '/') + ";"); this.modelInfo = modelInfo; EntityModel<? extends EntityFieldType> entityModel = modelInfo.getEntityModel(className); fields = entityModel != null ? entityModel.fields : null; this.panacheEntityBaseClassInfo = panacheEntityBaseClassInfo; } @Override @Override public MethodVisitor visitMethod(int access, String methodName, String descriptor, String signature, String[] exceptions) { if (methodName.startsWith("get") || methodName.startsWith("set") || methodName.startsWith("is")) methods.add(methodName + "/" + descriptor); MethodVisitor superVisitor = super.visitMethod(access, methodName, descriptor, signature, exceptions); return new PanacheFieldAccessMethodVisitor(superVisitor, thisClass.getInternalName(), methodName, descriptor, modelInfo); } @Override public void visitEnd() { for (MethodInfo method : panacheEntityBaseClassInfo.methods()) { AnnotationInstance bridge = method.annotation(JandexUtil.DOTNAME_GENERATE_BRIDGE); if (bridge != null) generateMethod(method, bridge.value("targetReturnTypeErased")); } generateAccessors(); super.visitEnd(); } private void generateMethod(MethodInfo method, AnnotationValue targetReturnTypeErased) { String descriptor = JandexUtil.getDescriptor(method, name -> null); String signature = JandexUtil.getSignature(method, name -> null); List<org.jboss.jandex.Type> parameters = method.parameters(); String castTo = null; if (targetReturnTypeErased != null && targetReturnTypeErased.asBoolean()) { castTo = method.returnType().name().toString('/'); } MethodVisitor mv = super.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC | Opcodes.ACC_SYNTHETIC, method.name(), descriptor, signature, null); for (int i = 0; i < parameters.size(); i++) { mv.visitParameter(method.parameterName(i), 0 /* modifiers */); } mv.visitCode(); injectModel(mv); for (int i = 0; i < parameters.size(); i++) { mv.visitIntInsn(Opcodes.ALOAD, i); } String forwardingDescriptor = "(" + getModelDescriptor() + descriptor.substring(1); if (castTo != null) { int lastParen = forwardingDescriptor.lastIndexOf(')'); forwardingDescriptor = forwardingDescriptor.substring(0, lastParen + 1) + "Ljava/lang/Object;"; } mv.visitMethodInsn(Opcodes.INVOKESTATIC, getPanacheOperationsBinaryName(), method.name(), forwardingDescriptor, false); if (castTo != null) mv.visitTypeInsn(Opcodes.CHECKCAST, castTo); String returnTypeDescriptor = descriptor.substring(descriptor.lastIndexOf(")") + 1); mv.visitInsn(JandexUtil.getReturnInstruction(returnTypeDescriptor)); mv.visitMaxs(0, 0); mv.visitEnd(); } protected abstract String getModelDescriptor(); protected abstract String getPanacheOperationsBinaryName(); protected abstract void injectModel(MethodVisitor mv); private void generateAccessors() { if (fields == null) return; for (EntityField field : fields.values()) { String getterName = field.getGetterName(); String getterDescriptor = "()" + field.descriptor; if (!methods.contains(getterName + "/" + getterDescriptor)) { MethodVisitor mv = super.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_SYNTHETIC, getterName, getterDescriptor, field.signature == null ? null : "()" + field.signature, null); mv.visitCode(); mv.visitIntInsn(Opcodes.ALOAD, 0); generateAccessorGetField(mv, field); int returnCode = JandexUtil.getReturnInstruction(field.descriptor); mv.visitInsn(returnCode); mv.visitMaxs(0, 0); for (EntityFieldAnnotation anno : field.annotations) { AnnotationVisitor av = mv.visitAnnotation(anno.descriptor, true); if (anno.name != null) av.visit(anno.name, anno.value); av.visitEnd(); } mv.visitEnd(); } String setterName = field.getSetterName(); String setterDescriptor = "(" + field.descriptor + ")V"; if (!methods.contains(setterName + "/" + setterDescriptor)) { MethodVisitor mv = super.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_SYNTHETIC, setterName, setterDescriptor, field.signature == null ? null : "(" + field.signature + ")V", null); mv.visitCode(); mv.visitIntInsn(Opcodes.ALOAD, 0); int loadCode; switch (field.descriptor) { case "Z": case "B": case "C": case "S": case "I": loadCode = Opcodes.ILOAD; break; case "J": loadCode = Opcodes.LLOAD; break; case "F": loadCode = Opcodes.FLOAD; break; case "D": loadCode = Opcodes.DLOAD; break; default: loadCode = Opcodes.ALOAD; break; } mv.visitIntInsn(loadCode, 1); generateAccessorSetField(mv, field); mv.visitInsn(Opcodes.RETURN); mv.visitMaxs(0, 0); mv.visitEnd(); } } } protected abstract void generateAccessorSetField(MethodVisitor mv, EntityField field); protected abstract void generateAccessorGetField(MethodVisitor mv, EntityField field); }
class PanacheEntityClassVisitor<EntityFieldType extends EntityField> extends ClassVisitor { protected Type thisClass; protected Map<String, ? extends EntityFieldType> fields; private Set<String> methods = new HashSet<>(); private MetamodelInfo<?> modelInfo; private ClassInfo panacheEntityBaseClassInfo; public PanacheEntityClassVisitor(String className, ClassVisitor outputClassVisitor, MetamodelInfo<? extends EntityModel<? extends EntityFieldType>> modelInfo, ClassInfo panacheEntityBaseClassInfo) { super(Opcodes.ASM7, outputClassVisitor); thisClass = Type.getType("L" + className.replace('.', '/') + ";"); this.modelInfo = modelInfo; EntityModel<? extends EntityFieldType> entityModel = modelInfo.getEntityModel(className); fields = entityModel != null ? entityModel.fields : null; this.panacheEntityBaseClassInfo = panacheEntityBaseClassInfo; } @Override @Override public MethodVisitor visitMethod(int access, String methodName, String descriptor, String signature, String[] exceptions) { if (methodName.startsWith("get") || methodName.startsWith("set") || methodName.startsWith("is")) methods.add(methodName + "/" + descriptor); MethodVisitor superVisitor = super.visitMethod(access, methodName, descriptor, signature, exceptions); return new PanacheFieldAccessMethodVisitor(superVisitor, thisClass.getInternalName(), methodName, descriptor, modelInfo); } @Override public void visitEnd() { for (MethodInfo method : panacheEntityBaseClassInfo.methods()) { AnnotationInstance bridge = method.annotation(JandexUtil.DOTNAME_GENERATE_BRIDGE); if (bridge != null) generateMethod(method, bridge.value("targetReturnTypeErased")); } generateAccessors(); super.visitEnd(); } private void generateMethod(MethodInfo method, AnnotationValue targetReturnTypeErased) { String descriptor = JandexUtil.getDescriptor(method, name -> null); String signature = JandexUtil.getSignature(method, name -> null); List<org.jboss.jandex.Type> parameters = method.parameters(); String castTo = null; if (targetReturnTypeErased != null && targetReturnTypeErased.asBoolean()) { castTo = method.returnType().name().toString('/'); } MethodVisitor mv = super.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC | Opcodes.ACC_SYNTHETIC, method.name(), descriptor, signature, null); for (int i = 0; i < parameters.size(); i++) { mv.visitParameter(method.parameterName(i), 0 /* modifiers */); } mv.visitCode(); injectModel(mv); for (int i = 0; i < parameters.size(); i++) { mv.visitIntInsn(Opcodes.ALOAD, i); } String forwardingDescriptor = "(" + getModelDescriptor() + descriptor.substring(1); if (castTo != null) { int lastParen = forwardingDescriptor.lastIndexOf(')'); forwardingDescriptor = forwardingDescriptor.substring(0, lastParen + 1) + "Ljava/lang/Object;"; } mv.visitMethodInsn(Opcodes.INVOKESTATIC, getPanacheOperationsBinaryName(), method.name(), forwardingDescriptor, false); if (castTo != null) mv.visitTypeInsn(Opcodes.CHECKCAST, castTo); String returnTypeDescriptor = descriptor.substring(descriptor.lastIndexOf(")") + 1); mv.visitInsn(JandexUtil.getReturnInstruction(returnTypeDescriptor)); mv.visitMaxs(0, 0); mv.visitEnd(); } protected abstract String getModelDescriptor(); protected abstract String getPanacheOperationsBinaryName(); protected abstract void injectModel(MethodVisitor mv); private void generateAccessors() { if (fields == null) return; for (EntityField field : fields.values()) { String getterName = field.getGetterName(); String getterDescriptor = "()" + field.descriptor; if (!methods.contains(getterName + "/" + getterDescriptor)) { MethodVisitor mv = super.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_SYNTHETIC, getterName, getterDescriptor, field.signature == null ? null : "()" + field.signature, null); mv.visitCode(); mv.visitIntInsn(Opcodes.ALOAD, 0); generateAccessorGetField(mv, field); int returnCode = JandexUtil.getReturnInstruction(field.descriptor); mv.visitInsn(returnCode); mv.visitMaxs(0, 0); for (EntityFieldAnnotation anno : field.annotations) { anno.writeToVisitor(mv); } mv.visitEnd(); } String setterName = field.getSetterName(); String setterDescriptor = "(" + field.descriptor + ")V"; if (!methods.contains(setterName + "/" + setterDescriptor)) { MethodVisitor mv = super.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_SYNTHETIC, setterName, setterDescriptor, field.signature == null ? null : "(" + field.signature + ")V", null); mv.visitCode(); mv.visitIntInsn(Opcodes.ALOAD, 0); int loadCode; switch (field.descriptor) { case "Z": case "B": case "C": case "S": case "I": loadCode = Opcodes.ILOAD; break; case "J": loadCode = Opcodes.LLOAD; break; case "F": loadCode = Opcodes.FLOAD; break; case "D": loadCode = Opcodes.DLOAD; break; default: loadCode = Opcodes.ALOAD; break; } mv.visitIntInsn(loadCode, 1); generateAccessorSetField(mv, field); mv.visitInsn(Opcodes.RETURN); mv.visitMaxs(0, 0); mv.visitEnd(); } } } protected abstract void generateAccessorSetField(MethodVisitor mv, EntityField field); protected abstract void generateAccessorGetField(MethodVisitor mv, EntityField field); }
We will probably want to add configuration properties for these but I'll let our community add them if they need them.
public QuarkusPathLocationScanner(Collection<Location> locations) { LOGGER.debugv("Locations: {0}", locations); this.scannedResources = new ArrayList<>(); ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); FileSystemScanner fileSystemScanner = null; for (String migrationFile : applicationMigrationFiles) { if (isClassPathResource(locations, migrationFile)) { LOGGER.debugf("Loading %s", migrationFile); scannedResources.add(new ClassPathResource(null, migrationFile, classLoader, StandardCharsets.UTF_8)); } else if (migrationFile.startsWith(Location.FILESYSTEM_PREFIX)) { if (fileSystemScanner == null) { fileSystemScanner = new FileSystemScanner(StandardCharsets.UTF_8, false, false, false); } LOGGER.debugf("Checking %s for migration files", migrationFile); Collection<LoadableResource> resources = fileSystemScanner.scanForResources(new Location(migrationFile)); LOGGER.debugf("%s contains %d migration files", migrationFile, resources.size()); scannedResources.addAll(resources); } } }
fileSystemScanner = new FileSystemScanner(StandardCharsets.UTF_8, false, false, false);
public QuarkusPathLocationScanner(Collection<Location> locations) { LOGGER.debugv("Locations: {0}", locations); this.scannedResources = new ArrayList<>(); ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); FileSystemScanner fileSystemScanner = null; for (String migrationFile : applicationMigrationFiles) { if (isClassPathResource(locations, migrationFile)) { LOGGER.debugf("Loading %s", migrationFile); scannedResources.add(new ClassPathResource(null, migrationFile, classLoader, StandardCharsets.UTF_8)); } else if (migrationFile.startsWith(Location.FILESYSTEM_PREFIX)) { if (fileSystemScanner == null) { fileSystemScanner = new FileSystemScanner(StandardCharsets.UTF_8, false, false, false); } LOGGER.debugf("Checking %s for migration files", migrationFile); Collection<LoadableResource> resources = fileSystemScanner.scanForResources(new Location(migrationFile)); LOGGER.debugf("%s contains %d migration files", migrationFile, resources.size()); scannedResources.addAll(resources); } } }
class QuarkusPathLocationScanner implements ResourceAndClassScanner { private static final Logger LOGGER = Logger.getLogger(QuarkusPathLocationScanner.class); private static final String LOCATION_SEPARATOR = "/"; private static Collection<String> applicationMigrationFiles = Collections.emptyList(); private static Collection<Class<? extends JavaMigration>> applicationMigrationClasses = Collections.emptyList(); private static Map<String, Collection<Callback>> applicationCallbackClasses = Collections.emptyMap(); private final Collection<LoadableResource> scannedResources; public static void setApplicationCallbackClasses(Map<String, Collection<Callback>> callbackClasses) { QuarkusPathLocationScanner.applicationCallbackClasses = callbackClasses; } public static Collection<Callback> callbacksForDataSource(String dsName) { return applicationCallbackClasses.getOrDefault(dsName, Collections.emptyList()); } /** * * @return The resources that were found. */ @Override public Collection<LoadableResource> scanForResources() { return scannedResources; } private boolean isClassPathResource(Collection<Location> locations, String migrationFile) { for (Location location : locations) { String locationPath = location.getPath(); if (!locationPath.endsWith(LOCATION_SEPARATOR)) { locationPath += "/"; } if (migrationFile.startsWith(locationPath)) { return true; } else { LOGGER.debugf("Migration file '%s' will be ignored because it does not start with '%s'", migrationFile, locationPath); } } return false; } /** * Scans the classpath for concrete classes under the specified package implementing this interface. * Non-instantiable abstract classes are filtered out. * * @return The non-abstract classes that were found. */ @Override public Collection<Class<? extends JavaMigration>> scanForClasses() { return applicationMigrationClasses; } public static void setApplicationMigrationFiles(Collection<String> applicationMigrationFiles) { QuarkusPathLocationScanner.applicationMigrationFiles = applicationMigrationFiles; } public static void setApplicationMigrationClasses(Collection<Class<? extends JavaMigration>> applicationMigrationClasses) { QuarkusPathLocationScanner.applicationMigrationClasses = applicationMigrationClasses; } }
class QuarkusPathLocationScanner implements ResourceAndClassScanner { private static final Logger LOGGER = Logger.getLogger(QuarkusPathLocationScanner.class); private static final String LOCATION_SEPARATOR = "/"; private static Collection<String> applicationMigrationFiles = Collections.emptyList(); private static Collection<Class<? extends JavaMigration>> applicationMigrationClasses = Collections.emptyList(); private static Map<String, Collection<Callback>> applicationCallbackClasses = Collections.emptyMap(); private final Collection<LoadableResource> scannedResources; public static void setApplicationCallbackClasses(Map<String, Collection<Callback>> callbackClasses) { QuarkusPathLocationScanner.applicationCallbackClasses = callbackClasses; } public static Collection<Callback> callbacksForDataSource(String dsName) { return applicationCallbackClasses.getOrDefault(dsName, Collections.emptyList()); } /** * * @return The resources that were found. */ @Override public Collection<LoadableResource> scanForResources() { return scannedResources; } private boolean isClassPathResource(Collection<Location> locations, String migrationFile) { for (Location location : locations) { String locationPath = location.getPath(); if (!locationPath.endsWith(LOCATION_SEPARATOR)) { locationPath += "/"; } if (migrationFile.startsWith(locationPath)) { return true; } else { LOGGER.debugf("Migration file '%s' will be ignored because it does not start with '%s'", migrationFile, locationPath); } } return false; } /** * Scans the classpath for concrete classes under the specified package implementing this interface. * Non-instantiable abstract classes are filtered out. * * @return The non-abstract classes that were found. */ @Override public Collection<Class<? extends JavaMigration>> scanForClasses() { return applicationMigrationClasses; } public static void setApplicationMigrationFiles(Collection<String> applicationMigrationFiles) { QuarkusPathLocationScanner.applicationMigrationFiles = applicationMigrationFiles; } public static void setApplicationMigrationClasses(Collection<Class<? extends JavaMigration>> applicationMigrationClasses) { QuarkusPathLocationScanner.applicationMigrationClasses = applicationMigrationClasses; } }
Not sure If I understand how more than one node can be set to maintenance (as is the goal when allowing more than one group to be down at a time) if that is the case? This methods checks both wanted state and actual state, so name is a bit misleading.
private Result checkAllNodesAreUp(ClusterState clusterState) { if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState(); for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState();
private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
will there be readme.md file? pls help to add more specific description on uasge of user token. keyvault, mysql flexible, network feature
public static void main(String[] args) throws Exception { TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD) .build(); AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); HttpPipelinePolicy userTokenPolicy = (context, next) -> { Mono<String> token = null; String bearerTokenPrefix = "bearer "; String authorization = context.getHttpRequest().getHeaders().getValue("Authorization"); if (authorization != null && authorization.toLowerCase(Locale.ROOT).startsWith(bearerTokenPrefix)) { token = Mono.just(authorization.substring(bearerTokenPrefix.length())); } else { token = credential .getToken(new TokenRequestContext().addScopes(profile.getEnvironment().getResourceManagerEndpoint() + "/.default")) .map(AccessToken::getToken); } return token .flatMap(accessToken -> { context.getHttpRequest().getHeaders().set(USER_TOKEN_HEADER, accessToken); return next.process(); }); }; AzureResourceManager azureResourceManager = AzureResourceManager.authenticate(credential, profile).withDefaultSubscription(); ServiceLinkerManager serviceLinkerManager = ServiceLinkerManager.authenticate(credential, profile); ServiceLinkerManager serviceLinkerManagerWithUserToken = ServiceLinkerManager.configure().withPolicy(userTokenPolicy).authenticate(credential, profile); creatSpringCloudAndSQLConnection(azureResourceManager, serviceLinkerManager); createWebAppAndKeyVaultConnectionWithUserIdentity(azureResourceManager, serviceLinkerManagerWithUserToken); }
public static void main(String[] args) throws Exception { TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD) .build(); AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); HttpPipelinePolicy userTokenPolicy = new UserTokenPolicy(credential, profile.getEnvironment()); AzureResourceManager azureResourceManager = AzureResourceManager.authenticate(credential, profile).withDefaultSubscription(); ServiceLinkerManager serviceLinkerManager = ServiceLinkerManager.authenticate(credential, profile); ServiceLinkerManager serviceLinkerManagerWithUserToken = ServiceLinkerManager.configure().withPolicy(userTokenPolicy).authenticate(credential, profile); createSpringCloudAndSQLConnection(azureResourceManager, serviceLinkerManager); createWebAppAndKeyVaultConnectionWithUserIdentity(azureResourceManager, serviceLinkerManagerWithUserToken); }
class CreateServiceLinker { private static final String USER_TOKEN_HEADER = "x-ms-serviceconnector-user-token"; /** * Main entry point. * * @param args the parameters */ private static void creatSpringCloudAndSQLConnection(AzureResourceManager azureResourceManager, ServiceLinkerManager serviceLinkerManager) { String resourceGroupName = "rg" + randomString(8); Region region = Region.US_EAST; String springServiceName = "spring" + randomString(8); String springAppName = "app" + randomString(8); String sqlServerName = "sqlserver" + randomString(8); String sqlDatabaseName = "sqldb" + randomString(8); String sqlUserName = "sql" + randomString(8); String sqlPassword = "5$Ql" + randomString(8); SpringService springService = azureResourceManager.springServices().define(springServiceName) .withRegion(region) .withNewResourceGroup(resourceGroupName) .withSku(SkuName.B0) .create(); SpringApp springApp = springService.apps().define(springAppName) .withDefaultActiveDeployment() .create(); SqlServer sqlServer = azureResourceManager.sqlServers().define(sqlServerName) .withRegion(region) .withExistingResourceGroup(resourceGroupName) .withAdministratorLogin(sqlUserName) .withAdministratorPassword(sqlPassword) .create(); SqlDatabase sqlDatabase = sqlServer.databases().define(sqlDatabaseName) .withBasicEdition() .create(); LinkerResource linker = serviceLinkerManager.linkers().define("sql") .withExistingResourceUri(springApp.getActiveDeployment().id()) .withTargetService( new AzureResource() .withId(sqlDatabase.id()) ) .withAuthInfo( new SecretAuthInfo() .withName(sqlUserName) .withSecretInfo( new ValueSecretInfo() .withValue(sqlPassword) ) ) .withClientType(ClientType.SPRING_BOOT) .create(); System.out.println("Configurations:"); for (SourceConfiguration sourceConfiguration : linker.listConfigurations().configurations()) { System.out.printf("\t%s: %s%n", sourceConfiguration.name(), sourceConfiguration.value()); } } private static void createWebAppAndKeyVaultConnectionWithUserIdentity(AzureResourceManager azureResourceManager, ServiceLinkerManager serviceLinkerManager) { String resourceGroupName = "rg" + randomString(8); Region region = Region.US_EAST; String webAppName = "web" + randomString(8); String keyVaultName = "vault" + randomString(8); String identityName = "identity" + randomString(8); WebApp webApp = azureResourceManager.webApps().define(webAppName) .withRegion(region) .withNewResourceGroup(resourceGroupName) .withNewLinuxPlan(PricingTier.BASIC_B1) .withBuiltInImage(RuntimeStack.NODEJS_14_LTS) .create(); Vault vault = azureResourceManager.vaults().define(keyVaultName) .withRegion(region) .withExistingResourceGroup(resourceGroupName) .withEmptyAccessPolicy() .create(); Identity identity = azureResourceManager.identities().define(identityName) .withRegion(region) .withExistingResourceGroup(resourceGroupName) .create(); LinkerResource linker = serviceLinkerManager.linkers().define("keyvault") .withExistingResourceUri(webApp.id()) .withTargetService( new AzureResource() .withId(vault.id()) ) .withAuthInfo( new UserAssignedIdentityAuthInfo() .withSubscriptionId(azureResourceManager.subscriptionId()) .withClientId(identity.clientId()) ) .withClientType(ClientType.NODEJS) .create(); System.out.println("Configurations:"); for (SourceConfiguration sourceConfiguration : linker.listConfigurations().configurations()) { System.out.printf("\t%s: %s%n", sourceConfiguration.name(), sourceConfiguration.value()); } } private static String randomString(int length) { return UUID.randomUUID().toString().replace("-", "").substring(0, length); } }
class CreateServiceLinker { private static final String USER_TOKEN_HEADER = "x-ms-serviceconnector-user-token"; /** * Main entry point. * * @param args the parameters */ private static void createSpringCloudAndSQLConnection(AzureResourceManager azureResourceManager, ServiceLinkerManager serviceLinkerManager) { String resourceGroupName = "rg" + randomString(8); Region region = Region.US_EAST; String springServiceName = "spring" + randomString(8); String springAppName = "app" + randomString(8); String sqlServerName = "sqlserver" + randomString(8); String sqlDatabaseName = "sqldb" + randomString(8); String sqlUserName = "sql" + randomString(8); String sqlPassword = "5$Ql" + randomString(8); SpringService springService = azureResourceManager.springServices().define(springServiceName) .withRegion(region) .withNewResourceGroup(resourceGroupName) .withSku(SkuName.B0) .create(); SpringApp springApp = springService.apps().define(springAppName) .withDefaultActiveDeployment() .create(); SqlServer sqlServer = azureResourceManager.sqlServers().define(sqlServerName) .withRegion(region) .withExistingResourceGroup(resourceGroupName) .withAdministratorLogin(sqlUserName) .withAdministratorPassword(sqlPassword) .create(); SqlDatabase sqlDatabase = sqlServer.databases().define(sqlDatabaseName) .withBasicEdition() .create(); LinkerResource linker = serviceLinkerManager.linkers().define("sql") .withExistingResourceUri(springApp.getActiveDeployment().id()) .withTargetService( new AzureResource() .withId(sqlDatabase.id()) ) .withAuthInfo( new SecretAuthInfo() .withName(sqlUserName) .withSecretInfo( new ValueSecretInfo() .withValue(sqlPassword) ) ) .withClientType(ClientType.SPRING_BOOT) .create(); System.out.println("Configurations:"); for (SourceConfiguration sourceConfiguration : linker.listConfigurations().configurations()) { System.out.printf("\t%s: %s%n", sourceConfiguration.name(), sourceConfiguration.value()); } } private static void createWebAppAndKeyVaultConnectionWithUserIdentity(AzureResourceManager azureResourceManager, ServiceLinkerManager serviceLinkerManager) { String resourceGroupName = "rg" + randomString(8); Region region = Region.US_EAST; String webAppName = "web" + randomString(8); String keyVaultName = "vault" + randomString(8); String identityName = "identity" + randomString(8); WebApp webApp = azureResourceManager.webApps().define(webAppName) .withRegion(region) .withNewResourceGroup(resourceGroupName) .withNewLinuxPlan(PricingTier.BASIC_B1) .withBuiltInImage(RuntimeStack.NODEJS_14_LTS) .create(); Vault vault = azureResourceManager.vaults().define(keyVaultName) .withRegion(region) .withExistingResourceGroup(resourceGroupName) .withEmptyAccessPolicy() .create(); Identity identity = azureResourceManager.identities().define(identityName) .withRegion(region) .withExistingResourceGroup(resourceGroupName) .create(); LinkerResource linker = serviceLinkerManager.linkers().define("keyvault") .withExistingResourceUri(webApp.id()) .withTargetService( new AzureResource() .withId(vault.id()) ) .withAuthInfo( new UserAssignedIdentityAuthInfo() .withSubscriptionId(azureResourceManager.subscriptionId()) .withClientId(identity.clientId()) ) .withClientType(ClientType.NODEJS) .create(); System.out.println("Configurations:"); for (SourceConfiguration sourceConfiguration : linker.listConfigurations().configurations()) { System.out.printf("\t%s: %s%n", sourceConfiguration.name(), sourceConfiguration.value()); } } private static String randomString(int length) { return UUID.randomUUID().toString().replace("-", "").substring(0, length); } public static class UserTokenPolicy implements HttpPipelinePolicy { private final TokenCredential credential; private final AzureEnvironment environment; public UserTokenPolicy(TokenCredential credential, AzureEnvironment environment) { this.credential = credential; this.environment = environment; } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { Mono<String> token = null; String bearerTokenPrefix = "bearer "; String authorization = context.getHttpRequest().getHeaders().getValue("Authorization"); if (authorization != null && authorization.toLowerCase(Locale.ROOT).startsWith(bearerTokenPrefix)) { token = Mono.just(authorization.substring(bearerTokenPrefix.length())); } else { token = credential .getToken(new TokenRequestContext().addScopes(environment.getResourceManagerEndpoint() + "/.default")) .map(AccessToken::getToken); } return token .flatMap(accessToken -> { context.getHttpRequest().getHeaders().set(USER_TOKEN_HEADER, accessToken); return next.process(); }); } } }
Ah, URI#getPath returns a string. > we can use artifactFilePath.getPath() sounds good 👍
public CompletableFuture<JobID> submitJob(@Nonnull JobGraph jobGraph) { CompletableFuture<java.nio.file.Path> jobGraphFileFuture = CompletableFuture.supplyAsync( () -> { try { final java.nio.file.Path jobGraphFile = Files.createTempFile("flink-jobgraph", ".bin"); try (ObjectOutputStream objectOut = new ObjectOutputStream( Files.newOutputStream(jobGraphFile))) { objectOut.writeObject(jobGraph); } return jobGraphFile; } catch (IOException e) { throw new CompletionException( new FlinkException("Failed to serialize JobGraph.", e)); } }, executorService); CompletableFuture<Tuple2<JobSubmitRequestBody, Collection<FileUpload>>> requestFuture = jobGraphFileFuture.thenApply( jobGraphFile -> { List<String> jarFileNames = new ArrayList<>(8); List<JobSubmitRequestBody.DistributedCacheFile> artifactFileNames = new ArrayList<>(8); Collection<FileUpload> filesToUpload = new ArrayList<>(8); filesToUpload.add( new FileUpload( jobGraphFile, RestConstants.CONTENT_TYPE_BINARY)); for (Path jar : jobGraph.getUserJars()) { jarFileNames.add(jar.getName()); filesToUpload.add( new FileUpload( Paths.get(jar.toUri()), RestConstants.CONTENT_TYPE_JAR)); } for (Map.Entry<String, DistributedCache.DistributedCacheEntry> artifacts : jobGraph.getUserArtifacts().entrySet()) { final Path artifactFilePath = new Path(artifacts.getValue().filePath); try { if (!artifactFilePath.getFileSystem().isDistributedFS()) { artifactFileNames.add( new JobSubmitRequestBody.DistributedCacheFile( artifacts.getKey(), artifactFilePath.getName())); filesToUpload.add( new FileUpload( Paths.get( artifactFilePath.toUri().getPath()), RestConstants.CONTENT_TYPE_BINARY)); } } catch (IOException e) { throw new CompletionException( new FlinkException( "Failed to get the FileSystem of artifact " + artifactFilePath + ".", e)); } } final JobSubmitRequestBody requestBody = new JobSubmitRequestBody( jobGraphFile.getFileName().toString(), jarFileNames, artifactFileNames); return Tuple2.of( requestBody, Collections.unmodifiableCollection(filesToUpload)); }); final CompletableFuture<JobSubmitResponseBody> submissionFuture = requestFuture.thenCompose( requestAndFileUploads -> { LOG.info( "Submitting job '{}' ({}).", jobGraph.getName(), jobGraph.getJobID()); return sendRetriableRequest( JobSubmitHeaders.getInstance(), EmptyMessageParameters.getInstance(), requestAndFileUploads.f0, requestAndFileUploads.f1, isConnectionProblemOrServiceUnavailable(), (receiver, error) -> { if (error != null) { LOG.warn( "Attempt to submit job '{}' ({}) to '{}' has failed.", jobGraph.getName(), jobGraph.getJobID(), receiver, error); } else { LOG.info( "Successfully submitted job '{}' ({}) to '{}'.", jobGraph.getName(), jobGraph.getJobID(), receiver); } }); }); submissionFuture .thenCompose(ignored -> jobGraphFileFuture) .thenAccept( jobGraphFile -> { try { Files.delete(jobGraphFile); } catch (IOException e) { LOG.warn("Could not delete temporary file {}.", jobGraphFile, e); } }); return submissionFuture .thenApply(ignore -> jobGraph.getJobID()) .exceptionally( (Throwable throwable) -> { throw new CompletionException( new JobSubmissionException( jobGraph.getJobID(), "Failed to submit JobGraph.", ExceptionUtils.stripCompletionException(throwable))); }); }
artifactFilePath.toUri().getPath()),
public CompletableFuture<JobID> submitJob(@Nonnull JobGraph jobGraph) { CompletableFuture<java.nio.file.Path> jobGraphFileFuture = CompletableFuture.supplyAsync( () -> { try { final java.nio.file.Path jobGraphFile = Files.createTempFile("flink-jobgraph", ".bin"); try (ObjectOutputStream objectOut = new ObjectOutputStream( Files.newOutputStream(jobGraphFile))) { objectOut.writeObject(jobGraph); } return jobGraphFile; } catch (IOException e) { throw new CompletionException( new FlinkException("Failed to serialize JobGraph.", e)); } }, executorService); CompletableFuture<Tuple2<JobSubmitRequestBody, Collection<FileUpload>>> requestFuture = jobGraphFileFuture.thenApply( jobGraphFile -> { List<String> jarFileNames = new ArrayList<>(8); List<JobSubmitRequestBody.DistributedCacheFile> artifactFileNames = new ArrayList<>(8); Collection<FileUpload> filesToUpload = new ArrayList<>(8); filesToUpload.add( new FileUpload( jobGraphFile, RestConstants.CONTENT_TYPE_BINARY)); for (Path jar : jobGraph.getUserJars()) { jarFileNames.add(jar.getName()); filesToUpload.add( new FileUpload( Paths.get(jar.toUri()), RestConstants.CONTENT_TYPE_JAR)); } for (Map.Entry<String, DistributedCache.DistributedCacheEntry> artifacts : jobGraph.getUserArtifacts().entrySet()) { final Path artifactFilePath = new Path(artifacts.getValue().filePath); try { if (!artifactFilePath.getFileSystem().isDistributedFS()) { artifactFileNames.add( new JobSubmitRequestBody.DistributedCacheFile( artifacts.getKey(), artifactFilePath.getName())); filesToUpload.add( new FileUpload( Paths.get(artifactFilePath.getPath()), RestConstants.CONTENT_TYPE_BINARY)); } } catch (IOException e) { throw new CompletionException( new FlinkException( "Failed to get the FileSystem of artifact " + artifactFilePath + ".", e)); } } final JobSubmitRequestBody requestBody = new JobSubmitRequestBody( jobGraphFile.getFileName().toString(), jarFileNames, artifactFileNames); return Tuple2.of( requestBody, Collections.unmodifiableCollection(filesToUpload)); }); final CompletableFuture<JobSubmitResponseBody> submissionFuture = requestFuture.thenCompose( requestAndFileUploads -> { LOG.info( "Submitting job '{}' ({}).", jobGraph.getName(), jobGraph.getJobID()); return sendRetriableRequest( JobSubmitHeaders.getInstance(), EmptyMessageParameters.getInstance(), requestAndFileUploads.f0, requestAndFileUploads.f1, isConnectionProblemOrServiceUnavailable(), (receiver, error) -> { if (error != null) { LOG.warn( "Attempt to submit job '{}' ({}) to '{}' has failed.", jobGraph.getName(), jobGraph.getJobID(), receiver, error); } else { LOG.info( "Successfully submitted job '{}' ({}) to '{}'.", jobGraph.getName(), jobGraph.getJobID(), receiver); } }); }); submissionFuture .thenCompose(ignored -> jobGraphFileFuture) .thenAccept( jobGraphFile -> { try { Files.delete(jobGraphFile); } catch (IOException e) { LOG.warn("Could not delete temporary file {}.", jobGraphFile, e); } }); return submissionFuture .thenApply(ignore -> jobGraph.getJobID()) .exceptionally( (Throwable throwable) -> { throw new CompletionException( new JobSubmissionException( jobGraph.getJobID(), "Failed to submit JobGraph.", ExceptionUtils.stripCompletionException(throwable))); }); }
class RestClusterClient<T> implements ClusterClient<T> { private static final Logger LOG = LoggerFactory.getLogger(RestClusterClient.class); private final RestClusterClientConfiguration restClusterClientConfiguration; private final Configuration configuration; private final RestClient restClient; private final ExecutorService executorService = Executors.newFixedThreadPool( 4, new ExecutorThreadFactory("Flink-RestClusterClient-IO")); private final WaitStrategy waitStrategy; private final T clusterId; private final ClientHighAvailabilityServices clientHAServices; private final LeaderRetrievalService webMonitorRetrievalService; private final LeaderRetriever webMonitorLeaderRetriever = new LeaderRetriever(); private final AtomicBoolean running = new AtomicBoolean(true); /** ExecutorService to run operations that can be retried on exceptions. */ private final ScheduledExecutorService retryExecutorService; private final Predicate<Throwable> unknownJobStateRetryable = exception -> ExceptionUtils.findThrowable(exception, JobStateUnknownException.class) .isPresent(); public RestClusterClient(Configuration config, T clusterId) throws Exception { this(config, clusterId, DefaultClientHighAvailabilityServicesFactory.INSTANCE); } public RestClusterClient( Configuration config, T clusterId, ClientHighAvailabilityServicesFactory factory) throws Exception { this(config, null, clusterId, new ExponentialWaitStrategy(10L, 2000L), factory); } @VisibleForTesting RestClusterClient( Configuration configuration, @Nullable RestClient restClient, T clusterId, WaitStrategy waitStrategy) throws Exception { this( configuration, restClient, clusterId, waitStrategy, DefaultClientHighAvailabilityServicesFactory.INSTANCE); } private RestClusterClient( Configuration configuration, @Nullable RestClient restClient, T clusterId, WaitStrategy waitStrategy, ClientHighAvailabilityServicesFactory clientHAServicesFactory) throws Exception { this.configuration = checkNotNull(configuration); this.restClusterClientConfiguration = RestClusterClientConfiguration.fromConfiguration(configuration); if (restClient != null) { this.restClient = restClient; } else { this.restClient = new RestClient(configuration, executorService); } this.waitStrategy = checkNotNull(waitStrategy); this.clusterId = checkNotNull(clusterId); this.clientHAServices = clientHAServicesFactory.create( configuration, exception -> webMonitorLeaderRetriever.handleError( new FlinkException( "Fatal error happened with client HA " + "services.", exception))); this.webMonitorRetrievalService = clientHAServices.getClusterRestEndpointLeaderRetriever(); this.retryExecutorService = Executors.newSingleThreadScheduledExecutor( new ExecutorThreadFactory("Flink-RestClusterClient-Retry")); startLeaderRetrievers(); } private void startLeaderRetrievers() throws Exception { this.webMonitorRetrievalService.start(webMonitorLeaderRetriever); } @Override public Configuration getFlinkConfiguration() { return new Configuration(configuration); } @Override public void close() { if (running.compareAndSet(true, false)) { ExecutorUtils.gracefulShutdown( restClusterClientConfiguration.getRetryDelay(), TimeUnit.MILLISECONDS, retryExecutorService); this.restClient.shutdown(Time.seconds(5)); ExecutorUtils.gracefulShutdown(5, TimeUnit.SECONDS, this.executorService); try { webMonitorRetrievalService.stop(); } catch (Exception e) { LOG.error("An error occurred during stopping the WebMonitorRetrievalService", e); } try { clientHAServices.close(); } catch (Exception e) { LOG.error( "An error occurred during stopping the ClientHighAvailabilityServices", e); } } } /** * Requests the job details. * * @param jobId The job id * @return Job details */ public CompletableFuture<JobDetailsInfo> getJobDetails(JobID jobId) { final JobDetailsHeaders detailsHeaders = JobDetailsHeaders.getInstance(); final JobMessageParameters params = new JobMessageParameters(); params.jobPathParameter.resolve(jobId); return sendRequest(detailsHeaders, params); } @Override public CompletableFuture<JobStatus> getJobStatus(JobID jobId) { final CheckedSupplier<CompletableFuture<JobStatus>> operation = () -> requestJobStatus(jobId); return retry(operation, unknownJobStateRetryable); } /** * Requests the {@link JobResult} for the given {@link JobID}. The method retries multiple times * to poll the {@link JobResult} before giving up. * * @param jobId specifying the job for which to retrieve the {@link JobResult} * @return Future which is completed with the {@link JobResult} once the job has completed or * with a failure if the {@link JobResult} could not be retrieved. */ @Override public CompletableFuture<JobResult> requestJobResult(@Nonnull JobID jobId) { final CheckedSupplier<CompletableFuture<JobResult>> operation = () -> requestJobResultInternal(jobId); return retry(operation, unknownJobStateRetryable); } @Override @Override public CompletableFuture<Acknowledge> cancel(JobID jobID) { JobCancellationMessageParameters params = new JobCancellationMessageParameters() .resolveJobId(jobID) .resolveTerminationMode( TerminationModeQueryParameter.TerminationMode.CANCEL); CompletableFuture<EmptyResponseBody> responseFuture = sendRequest(JobCancellationHeaders.getInstance(), params); return responseFuture.thenApply(ignore -> Acknowledge.get()); } @Override public CompletableFuture<String> stopWithSavepoint( final JobID jobId, final boolean advanceToEndOfTime, @Nullable final String savepointDirectory) { final StopWithSavepointTriggerHeaders stopWithSavepointTriggerHeaders = StopWithSavepointTriggerHeaders.getInstance(); final SavepointTriggerMessageParameters stopWithSavepointTriggerMessageParameters = stopWithSavepointTriggerHeaders.getUnresolvedMessageParameters(); stopWithSavepointTriggerMessageParameters.jobID.resolve(jobId); final CompletableFuture<TriggerResponse> responseFuture = sendRequest( stopWithSavepointTriggerHeaders, stopWithSavepointTriggerMessageParameters, new StopWithSavepointRequestBody( savepointDirectory, advanceToEndOfTime, null)); return responseFuture .thenCompose( savepointTriggerResponseBody -> { final TriggerId savepointTriggerId = savepointTriggerResponseBody.getTriggerId(); return pollSavepointAsync(jobId, savepointTriggerId); }) .thenApply( savepointInfo -> { if (savepointInfo.getFailureCause() != null) { throw new CompletionException(savepointInfo.getFailureCause()); } return savepointInfo.getLocation(); }); } @Override public CompletableFuture<String> cancelWithSavepoint( JobID jobId, @Nullable String savepointDirectory) { return triggerSavepoint(jobId, savepointDirectory, true); } @Override public CompletableFuture<String> triggerSavepoint( final JobID jobId, final @Nullable String savepointDirectory) { return triggerSavepoint(jobId, savepointDirectory, false); } @Override public CompletableFuture<CoordinationResponse> sendCoordinationRequest( JobID jobId, OperatorID operatorId, CoordinationRequest request) { ClientCoordinationHeaders headers = ClientCoordinationHeaders.getInstance(); ClientCoordinationMessageParameters params = new ClientCoordinationMessageParameters(); params.jobPathParameter.resolve(jobId); params.operatorPathParameter.resolve(operatorId); SerializedValue<CoordinationRequest> serializedRequest; try { serializedRequest = new SerializedValue<>(request); } catch (IOException e) { return FutureUtils.completedExceptionally(e); } ClientCoordinationRequestBody requestBody = new ClientCoordinationRequestBody(serializedRequest); return sendRequest(headers, params, requestBody) .thenApply( responseBody -> { try { return responseBody .getSerializedCoordinationResponse() .deserializeValue(getClass().getClassLoader()); } catch (IOException | ClassNotFoundException e) { throw new CompletionException( "Failed to deserialize coordination response", e); } }); } private CompletableFuture<String> triggerSavepoint( final JobID jobId, final @Nullable String savepointDirectory, final boolean cancelJob) { final SavepointTriggerHeaders savepointTriggerHeaders = SavepointTriggerHeaders.getInstance(); final SavepointTriggerMessageParameters savepointTriggerMessageParameters = savepointTriggerHeaders.getUnresolvedMessageParameters(); savepointTriggerMessageParameters.jobID.resolve(jobId); final CompletableFuture<TriggerResponse> responseFuture = sendRequest( savepointTriggerHeaders, savepointTriggerMessageParameters, new SavepointTriggerRequestBody(savepointDirectory, cancelJob, null)); return responseFuture .thenCompose( savepointTriggerResponseBody -> { final TriggerId savepointTriggerId = savepointTriggerResponseBody.getTriggerId(); return pollSavepointAsync(jobId, savepointTriggerId); }) .thenApply( savepointInfo -> { if (savepointInfo.getFailureCause() != null) { throw new CompletionException(savepointInfo.getFailureCause()); } return savepointInfo.getLocation(); }); } @Override public CompletableFuture<Map<String, Object>> getAccumulators(JobID jobID, ClassLoader loader) { final JobAccumulatorsHeaders accumulatorsHeaders = JobAccumulatorsHeaders.getInstance(); final JobAccumulatorsMessageParameters accMsgParams = accumulatorsHeaders.getUnresolvedMessageParameters(); accMsgParams.jobPathParameter.resolve(jobID); accMsgParams.includeSerializedAccumulatorsParameter.resolve( Collections.singletonList(true)); CompletableFuture<JobAccumulatorsInfo> responseFuture = sendRequest(accumulatorsHeaders, accMsgParams); return responseFuture .thenApply(JobAccumulatorsInfo::getSerializedUserAccumulators) .thenApply( accumulators -> { try { return AccumulatorHelper.deserializeAndUnwrapAccumulators( accumulators, loader); } catch (Exception e) { throw new CompletionException( "Cannot deserialize and unwrap accumulators properly.", e); } }); } private CompletableFuture<SavepointInfo> pollSavepointAsync( final JobID jobId, final TriggerId triggerID) { return pollResourceAsync( () -> { final SavepointStatusHeaders savepointStatusHeaders = SavepointStatusHeaders.getInstance(); final SavepointStatusMessageParameters savepointStatusMessageParameters = savepointStatusHeaders.getUnresolvedMessageParameters(); savepointStatusMessageParameters.jobIdPathParameter.resolve(jobId); savepointStatusMessageParameters.triggerIdPathParameter.resolve(triggerID); return sendRequest(savepointStatusHeaders, savepointStatusMessageParameters); }); } @Override public CompletableFuture<Collection<JobStatusMessage>> listJobs() { return sendRequest(JobsOverviewHeaders.getInstance()) .thenApply( (multipleJobsDetails) -> multipleJobsDetails.getJobs().stream() .map( detail -> new JobStatusMessage( detail.getJobId(), detail.getJobName(), detail.getStatus(), detail.getStartTime())) .collect(Collectors.toList())); } @Override public T getClusterId() { return clusterId; } @Override public CompletableFuture<Acknowledge> disposeSavepoint(String savepointPath) { final SavepointDisposalRequest savepointDisposalRequest = new SavepointDisposalRequest(savepointPath); final CompletableFuture<TriggerResponse> savepointDisposalTriggerFuture = sendRequest( SavepointDisposalTriggerHeaders.getInstance(), savepointDisposalRequest); final CompletableFuture<AsynchronousOperationInfo> savepointDisposalFuture = savepointDisposalTriggerFuture.thenCompose( (TriggerResponse triggerResponse) -> { final TriggerId triggerId = triggerResponse.getTriggerId(); final SavepointDisposalStatusHeaders savepointDisposalStatusHeaders = SavepointDisposalStatusHeaders.getInstance(); final SavepointDisposalStatusMessageParameters savepointDisposalStatusMessageParameters = savepointDisposalStatusHeaders .getUnresolvedMessageParameters(); savepointDisposalStatusMessageParameters.triggerIdPathParameter.resolve( triggerId); return pollResourceAsync( () -> sendRequest( savepointDisposalStatusHeaders, savepointDisposalStatusMessageParameters)); }); return savepointDisposalFuture.thenApply( (AsynchronousOperationInfo asynchronousOperationInfo) -> { if (asynchronousOperationInfo.getFailureCause() == null) { return Acknowledge.get(); } else { throw new CompletionException(asynchronousOperationInfo.getFailureCause()); } }); } @Override public void shutDownCluster() { try { sendRequest(ShutdownHeaders.getInstance()).get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (ExecutionException e) { LOG.error("Error while shutting down cluster", e); } } /** * Creates a {@code CompletableFuture} that polls a {@code AsynchronouslyCreatedResource} until * its {@link AsynchronouslyCreatedResource * QueueStatus.Id * AsynchronouslyCreatedResource * * @param resourceFutureSupplier The operation which polls for the {@code * AsynchronouslyCreatedResource}. * @param <R> The type of the resource. * @param <A> The type of the {@code AsynchronouslyCreatedResource}. * @return A {@code CompletableFuture} delivering the resource. */ private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync( final Supplier<CompletableFuture<A>> resourceFutureSupplier) { return pollResourceAsync(resourceFutureSupplier, new CompletableFuture<>(), 0); } private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync( final Supplier<CompletableFuture<A>> resourceFutureSupplier, final CompletableFuture<R> resultFuture, final long attempt) { resourceFutureSupplier .get() .whenComplete( (asynchronouslyCreatedResource, throwable) -> { if (throwable != null) { resultFuture.completeExceptionally(throwable); } else { if (asynchronouslyCreatedResource.queueStatus().getId() == QueueStatus.Id.COMPLETED) { resultFuture.complete(asynchronouslyCreatedResource.resource()); } else { retryExecutorService.schedule( () -> { pollResourceAsync( resourceFutureSupplier, resultFuture, attempt + 1); }, waitStrategy.sleepTime(attempt), TimeUnit.MILLISECONDS); } } }); return resultFuture; } @Override public String getWebInterfaceURL() { try { return getWebMonitorBaseUrl().get().toString(); } catch (InterruptedException | ExecutionException e) { ExceptionUtils.checkInterrupted(e); LOG.warn("Could not retrieve the web interface URL for the cluster.", e); return "Unknown address."; } } private CompletableFuture<JobStatus> requestJobStatus(JobID jobId) { return getJobDetails(jobId) .thenApply(JobDetailsInfo::getJobStatus) .thenApply( jobStatus -> { if (jobStatus == JobStatus.SUSPENDED) { throw new JobStateUnknownException( String.format("Job %s is in state SUSPENDED", jobId)); } return jobStatus; }); } private static class JobStateUnknownException extends RuntimeException { public JobStateUnknownException(String message) { super(message); } } private CompletableFuture<JobResult> requestJobResultInternal(@Nonnull JobID jobId) { return pollResourceAsync( () -> { final JobMessageParameters messageParameters = new JobMessageParameters(); messageParameters.jobPathParameter.resolve(jobId); return sendRequest( JobExecutionResultHeaders.getInstance(), messageParameters); }) .thenApply( jobResult -> { if (jobResult.getApplicationStatus() == ApplicationStatus.UNKNOWN) { throw new JobStateUnknownException( String.format("Result for Job %s is UNKNOWN", jobId)); } return jobResult; }); } private < M extends MessageHeaders<EmptyRequestBody, P, U>, U extends MessageParameters, P extends ResponseBody> CompletableFuture<P> sendRequest(M messageHeaders, U messageParameters) { return sendRequest(messageHeaders, messageParameters, EmptyRequestBody.getInstance()); } private < M extends MessageHeaders<R, P, EmptyMessageParameters>, R extends RequestBody, P extends ResponseBody> CompletableFuture<P> sendRequest(M messageHeaders, R request) { return sendRequest(messageHeaders, EmptyMessageParameters.getInstance(), request); } @VisibleForTesting <M extends MessageHeaders<EmptyRequestBody, P, EmptyMessageParameters>, P extends ResponseBody> CompletableFuture<P> sendRequest(M messageHeaders) { return sendRequest( messageHeaders, EmptyMessageParameters.getInstance(), EmptyRequestBody.getInstance()); } @VisibleForTesting public < M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P> sendRequest(M messageHeaders, U messageParameters, R request) { return sendRetriableRequest( messageHeaders, messageParameters, request, isConnectionProblemOrServiceUnavailable()); } private < M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P> sendRetriableRequest( M messageHeaders, U messageParameters, R request, Predicate<Throwable> retryPredicate) { return sendRetriableRequest( messageHeaders, messageParameters, request, Collections.emptyList(), retryPredicate, (receiver, error) -> { }); } private < M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P> sendRetriableRequest( M messageHeaders, U messageParameters, R request, Collection<FileUpload> filesToUpload, Predicate<Throwable> retryPredicate, BiConsumer<String, Throwable> consumer) { return retry( () -> getWebMonitorBaseUrl() .thenCompose( webMonitorBaseUrl -> { try { final CompletableFuture<P> future = restClient.sendRequest( webMonitorBaseUrl.getHost(), webMonitorBaseUrl.getPort(), messageHeaders, messageParameters, request, filesToUpload); future.whenComplete( (result, error) -> consumer.accept( webMonitorBaseUrl .toString(), error)); return future; } catch (IOException e) { throw new CompletionException(e); } }), retryPredicate); } private <C> CompletableFuture<C> retry( CheckedSupplier<CompletableFuture<C>> operation, Predicate<Throwable> retryPredicate) { return FutureUtils.retryWithDelay( CheckedSupplier.unchecked(operation), restClusterClientConfiguration.getRetryMaxAttempts(), Time.milliseconds(restClusterClientConfiguration.getRetryDelay()), retryPredicate, new ScheduledExecutorServiceAdapter(retryExecutorService)); } private static Predicate<Throwable> isConnectionProblemOrServiceUnavailable() { return isConnectionProblemException().or(isServiceUnavailable()); } private static Predicate<Throwable> isConnectionProblemException() { return (throwable) -> ExceptionUtils.findThrowable(throwable, java.net.ConnectException.class).isPresent() || ExceptionUtils.findThrowable( throwable, java.net.SocketTimeoutException.class) .isPresent() || ExceptionUtils.findThrowable(throwable, ConnectTimeoutException.class) .isPresent() || ExceptionUtils.findThrowable(throwable, IOException.class).isPresent(); } private static Predicate<Throwable> isServiceUnavailable() { return httpExceptionCodePredicate( code -> code == HttpResponseStatus.SERVICE_UNAVAILABLE.code()); } private static Predicate<Throwable> httpExceptionCodePredicate( Predicate<Integer> statusCodePredicate) { return (throwable) -> ExceptionUtils.findThrowable(throwable, RestClientException.class) .map( restClientException -> { final int code = restClientException.getHttpResponseStatus().code(); return statusCodePredicate.test(code); }) .orElse(false); } @VisibleForTesting CompletableFuture<URL> getWebMonitorBaseUrl() { return FutureUtils.orTimeout( webMonitorLeaderRetriever.getLeaderFuture(), restClusterClientConfiguration.getAwaitLeaderTimeout(), TimeUnit.MILLISECONDS) .thenApplyAsync( leaderAddressSessionId -> { final String url = leaderAddressSessionId.f0; try { return new URL(url); } catch (MalformedURLException e) { throw new IllegalArgumentException( "Could not parse URL from " + url, e); } }, executorService); } }
class RestClusterClient<T> implements ClusterClient<T> { private static final Logger LOG = LoggerFactory.getLogger(RestClusterClient.class); private final RestClusterClientConfiguration restClusterClientConfiguration; private final Configuration configuration; private final RestClient restClient; private final ExecutorService executorService = Executors.newFixedThreadPool( 4, new ExecutorThreadFactory("Flink-RestClusterClient-IO")); private final WaitStrategy waitStrategy; private final T clusterId; private final ClientHighAvailabilityServices clientHAServices; private final LeaderRetrievalService webMonitorRetrievalService; private final LeaderRetriever webMonitorLeaderRetriever = new LeaderRetriever(); private final AtomicBoolean running = new AtomicBoolean(true); /** ExecutorService to run operations that can be retried on exceptions. */ private final ScheduledExecutorService retryExecutorService; private final Predicate<Throwable> unknownJobStateRetryable = exception -> ExceptionUtils.findThrowable(exception, JobStateUnknownException.class) .isPresent(); public RestClusterClient(Configuration config, T clusterId) throws Exception { this(config, clusterId, DefaultClientHighAvailabilityServicesFactory.INSTANCE); } public RestClusterClient( Configuration config, T clusterId, ClientHighAvailabilityServicesFactory factory) throws Exception { this(config, null, clusterId, new ExponentialWaitStrategy(10L, 2000L), factory); } @VisibleForTesting RestClusterClient( Configuration configuration, @Nullable RestClient restClient, T clusterId, WaitStrategy waitStrategy) throws Exception { this( configuration, restClient, clusterId, waitStrategy, DefaultClientHighAvailabilityServicesFactory.INSTANCE); } private RestClusterClient( Configuration configuration, @Nullable RestClient restClient, T clusterId, WaitStrategy waitStrategy, ClientHighAvailabilityServicesFactory clientHAServicesFactory) throws Exception { this.configuration = checkNotNull(configuration); this.restClusterClientConfiguration = RestClusterClientConfiguration.fromConfiguration(configuration); if (restClient != null) { this.restClient = restClient; } else { this.restClient = new RestClient(configuration, executorService); } this.waitStrategy = checkNotNull(waitStrategy); this.clusterId = checkNotNull(clusterId); this.clientHAServices = clientHAServicesFactory.create( configuration, exception -> webMonitorLeaderRetriever.handleError( new FlinkException( "Fatal error happened with client HA " + "services.", exception))); this.webMonitorRetrievalService = clientHAServices.getClusterRestEndpointLeaderRetriever(); this.retryExecutorService = Executors.newSingleThreadScheduledExecutor( new ExecutorThreadFactory("Flink-RestClusterClient-Retry")); startLeaderRetrievers(); } private void startLeaderRetrievers() throws Exception { this.webMonitorRetrievalService.start(webMonitorLeaderRetriever); } @Override public Configuration getFlinkConfiguration() { return new Configuration(configuration); } @Override public void close() { if (running.compareAndSet(true, false)) { ExecutorUtils.gracefulShutdown( restClusterClientConfiguration.getRetryDelay(), TimeUnit.MILLISECONDS, retryExecutorService); this.restClient.shutdown(Time.seconds(5)); ExecutorUtils.gracefulShutdown(5, TimeUnit.SECONDS, this.executorService); try { webMonitorRetrievalService.stop(); } catch (Exception e) { LOG.error("An error occurred during stopping the WebMonitorRetrievalService", e); } try { clientHAServices.close(); } catch (Exception e) { LOG.error( "An error occurred during stopping the ClientHighAvailabilityServices", e); } } } /** * Requests the job details. * * @param jobId The job id * @return Job details */ public CompletableFuture<JobDetailsInfo> getJobDetails(JobID jobId) { final JobDetailsHeaders detailsHeaders = JobDetailsHeaders.getInstance(); final JobMessageParameters params = new JobMessageParameters(); params.jobPathParameter.resolve(jobId); return sendRequest(detailsHeaders, params); } @Override public CompletableFuture<JobStatus> getJobStatus(JobID jobId) { final CheckedSupplier<CompletableFuture<JobStatus>> operation = () -> requestJobStatus(jobId); return retry(operation, unknownJobStateRetryable); } /** * Requests the {@link JobResult} for the given {@link JobID}. The method retries multiple times * to poll the {@link JobResult} before giving up. * * @param jobId specifying the job for which to retrieve the {@link JobResult} * @return Future which is completed with the {@link JobResult} once the job has completed or * with a failure if the {@link JobResult} could not be retrieved. */ @Override public CompletableFuture<JobResult> requestJobResult(@Nonnull JobID jobId) { final CheckedSupplier<CompletableFuture<JobResult>> operation = () -> requestJobResultInternal(jobId); return retry(operation, unknownJobStateRetryable); } @Override @Override public CompletableFuture<Acknowledge> cancel(JobID jobID) { JobCancellationMessageParameters params = new JobCancellationMessageParameters() .resolveJobId(jobID) .resolveTerminationMode( TerminationModeQueryParameter.TerminationMode.CANCEL); CompletableFuture<EmptyResponseBody> responseFuture = sendRequest(JobCancellationHeaders.getInstance(), params); return responseFuture.thenApply(ignore -> Acknowledge.get()); } @Override public CompletableFuture<String> stopWithSavepoint( final JobID jobId, final boolean advanceToEndOfTime, @Nullable final String savepointDirectory) { final StopWithSavepointTriggerHeaders stopWithSavepointTriggerHeaders = StopWithSavepointTriggerHeaders.getInstance(); final SavepointTriggerMessageParameters stopWithSavepointTriggerMessageParameters = stopWithSavepointTriggerHeaders.getUnresolvedMessageParameters(); stopWithSavepointTriggerMessageParameters.jobID.resolve(jobId); final CompletableFuture<TriggerResponse> responseFuture = sendRequest( stopWithSavepointTriggerHeaders, stopWithSavepointTriggerMessageParameters, new StopWithSavepointRequestBody( savepointDirectory, advanceToEndOfTime, null)); return responseFuture .thenCompose( savepointTriggerResponseBody -> { final TriggerId savepointTriggerId = savepointTriggerResponseBody.getTriggerId(); return pollSavepointAsync(jobId, savepointTriggerId); }) .thenApply( savepointInfo -> { if (savepointInfo.getFailureCause() != null) { throw new CompletionException(savepointInfo.getFailureCause()); } return savepointInfo.getLocation(); }); } @Override public CompletableFuture<String> cancelWithSavepoint( JobID jobId, @Nullable String savepointDirectory) { return triggerSavepoint(jobId, savepointDirectory, true); } @Override public CompletableFuture<String> triggerSavepoint( final JobID jobId, final @Nullable String savepointDirectory) { return triggerSavepoint(jobId, savepointDirectory, false); } @Override public CompletableFuture<CoordinationResponse> sendCoordinationRequest( JobID jobId, OperatorID operatorId, CoordinationRequest request) { ClientCoordinationHeaders headers = ClientCoordinationHeaders.getInstance(); ClientCoordinationMessageParameters params = new ClientCoordinationMessageParameters(); params.jobPathParameter.resolve(jobId); params.operatorPathParameter.resolve(operatorId); SerializedValue<CoordinationRequest> serializedRequest; try { serializedRequest = new SerializedValue<>(request); } catch (IOException e) { return FutureUtils.completedExceptionally(e); } ClientCoordinationRequestBody requestBody = new ClientCoordinationRequestBody(serializedRequest); return sendRequest(headers, params, requestBody) .thenApply( responseBody -> { try { return responseBody .getSerializedCoordinationResponse() .deserializeValue(getClass().getClassLoader()); } catch (IOException | ClassNotFoundException e) { throw new CompletionException( "Failed to deserialize coordination response", e); } }); } private CompletableFuture<String> triggerSavepoint( final JobID jobId, final @Nullable String savepointDirectory, final boolean cancelJob) { final SavepointTriggerHeaders savepointTriggerHeaders = SavepointTriggerHeaders.getInstance(); final SavepointTriggerMessageParameters savepointTriggerMessageParameters = savepointTriggerHeaders.getUnresolvedMessageParameters(); savepointTriggerMessageParameters.jobID.resolve(jobId); final CompletableFuture<TriggerResponse> responseFuture = sendRequest( savepointTriggerHeaders, savepointTriggerMessageParameters, new SavepointTriggerRequestBody(savepointDirectory, cancelJob, null)); return responseFuture .thenCompose( savepointTriggerResponseBody -> { final TriggerId savepointTriggerId = savepointTriggerResponseBody.getTriggerId(); return pollSavepointAsync(jobId, savepointTriggerId); }) .thenApply( savepointInfo -> { if (savepointInfo.getFailureCause() != null) { throw new CompletionException(savepointInfo.getFailureCause()); } return savepointInfo.getLocation(); }); } @Override public CompletableFuture<Map<String, Object>> getAccumulators(JobID jobID, ClassLoader loader) { final JobAccumulatorsHeaders accumulatorsHeaders = JobAccumulatorsHeaders.getInstance(); final JobAccumulatorsMessageParameters accMsgParams = accumulatorsHeaders.getUnresolvedMessageParameters(); accMsgParams.jobPathParameter.resolve(jobID); accMsgParams.includeSerializedAccumulatorsParameter.resolve( Collections.singletonList(true)); CompletableFuture<JobAccumulatorsInfo> responseFuture = sendRequest(accumulatorsHeaders, accMsgParams); return responseFuture .thenApply(JobAccumulatorsInfo::getSerializedUserAccumulators) .thenApply( accumulators -> { try { return AccumulatorHelper.deserializeAndUnwrapAccumulators( accumulators, loader); } catch (Exception e) { throw new CompletionException( "Cannot deserialize and unwrap accumulators properly.", e); } }); } private CompletableFuture<SavepointInfo> pollSavepointAsync( final JobID jobId, final TriggerId triggerID) { return pollResourceAsync( () -> { final SavepointStatusHeaders savepointStatusHeaders = SavepointStatusHeaders.getInstance(); final SavepointStatusMessageParameters savepointStatusMessageParameters = savepointStatusHeaders.getUnresolvedMessageParameters(); savepointStatusMessageParameters.jobIdPathParameter.resolve(jobId); savepointStatusMessageParameters.triggerIdPathParameter.resolve(triggerID); return sendRequest(savepointStatusHeaders, savepointStatusMessageParameters); }); } @Override public CompletableFuture<Collection<JobStatusMessage>> listJobs() { return sendRequest(JobsOverviewHeaders.getInstance()) .thenApply( (multipleJobsDetails) -> multipleJobsDetails.getJobs().stream() .map( detail -> new JobStatusMessage( detail.getJobId(), detail.getJobName(), detail.getStatus(), detail.getStartTime())) .collect(Collectors.toList())); } @Override public T getClusterId() { return clusterId; } @Override public CompletableFuture<Acknowledge> disposeSavepoint(String savepointPath) { final SavepointDisposalRequest savepointDisposalRequest = new SavepointDisposalRequest(savepointPath); final CompletableFuture<TriggerResponse> savepointDisposalTriggerFuture = sendRequest( SavepointDisposalTriggerHeaders.getInstance(), savepointDisposalRequest); final CompletableFuture<AsynchronousOperationInfo> savepointDisposalFuture = savepointDisposalTriggerFuture.thenCompose( (TriggerResponse triggerResponse) -> { final TriggerId triggerId = triggerResponse.getTriggerId(); final SavepointDisposalStatusHeaders savepointDisposalStatusHeaders = SavepointDisposalStatusHeaders.getInstance(); final SavepointDisposalStatusMessageParameters savepointDisposalStatusMessageParameters = savepointDisposalStatusHeaders .getUnresolvedMessageParameters(); savepointDisposalStatusMessageParameters.triggerIdPathParameter.resolve( triggerId); return pollResourceAsync( () -> sendRequest( savepointDisposalStatusHeaders, savepointDisposalStatusMessageParameters)); }); return savepointDisposalFuture.thenApply( (AsynchronousOperationInfo asynchronousOperationInfo) -> { if (asynchronousOperationInfo.getFailureCause() == null) { return Acknowledge.get(); } else { throw new CompletionException(asynchronousOperationInfo.getFailureCause()); } }); } @Override public void shutDownCluster() { try { sendRequest(ShutdownHeaders.getInstance()).get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (ExecutionException e) { LOG.error("Error while shutting down cluster", e); } } /** * Creates a {@code CompletableFuture} that polls a {@code AsynchronouslyCreatedResource} until * its {@link AsynchronouslyCreatedResource * QueueStatus.Id * AsynchronouslyCreatedResource * * @param resourceFutureSupplier The operation which polls for the {@code * AsynchronouslyCreatedResource}. * @param <R> The type of the resource. * @param <A> The type of the {@code AsynchronouslyCreatedResource}. * @return A {@code CompletableFuture} delivering the resource. */ private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync( final Supplier<CompletableFuture<A>> resourceFutureSupplier) { return pollResourceAsync(resourceFutureSupplier, new CompletableFuture<>(), 0); } private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync( final Supplier<CompletableFuture<A>> resourceFutureSupplier, final CompletableFuture<R> resultFuture, final long attempt) { resourceFutureSupplier .get() .whenComplete( (asynchronouslyCreatedResource, throwable) -> { if (throwable != null) { resultFuture.completeExceptionally(throwable); } else { if (asynchronouslyCreatedResource.queueStatus().getId() == QueueStatus.Id.COMPLETED) { resultFuture.complete(asynchronouslyCreatedResource.resource()); } else { retryExecutorService.schedule( () -> { pollResourceAsync( resourceFutureSupplier, resultFuture, attempt + 1); }, waitStrategy.sleepTime(attempt), TimeUnit.MILLISECONDS); } } }); return resultFuture; } @Override public String getWebInterfaceURL() { try { return getWebMonitorBaseUrl().get().toString(); } catch (InterruptedException | ExecutionException e) { ExceptionUtils.checkInterrupted(e); LOG.warn("Could not retrieve the web interface URL for the cluster.", e); return "Unknown address."; } } private CompletableFuture<JobStatus> requestJobStatus(JobID jobId) { return getJobDetails(jobId) .thenApply(JobDetailsInfo::getJobStatus) .thenApply( jobStatus -> { if (jobStatus == JobStatus.SUSPENDED) { throw new JobStateUnknownException( String.format("Job %s is in state SUSPENDED", jobId)); } return jobStatus; }); } private static class JobStateUnknownException extends RuntimeException { public JobStateUnknownException(String message) { super(message); } } private CompletableFuture<JobResult> requestJobResultInternal(@Nonnull JobID jobId) { return pollResourceAsync( () -> { final JobMessageParameters messageParameters = new JobMessageParameters(); messageParameters.jobPathParameter.resolve(jobId); return sendRequest( JobExecutionResultHeaders.getInstance(), messageParameters); }) .thenApply( jobResult -> { if (jobResult.getApplicationStatus() == ApplicationStatus.UNKNOWN) { throw new JobStateUnknownException( String.format("Result for Job %s is UNKNOWN", jobId)); } return jobResult; }); } private < M extends MessageHeaders<EmptyRequestBody, P, U>, U extends MessageParameters, P extends ResponseBody> CompletableFuture<P> sendRequest(M messageHeaders, U messageParameters) { return sendRequest(messageHeaders, messageParameters, EmptyRequestBody.getInstance()); } private < M extends MessageHeaders<R, P, EmptyMessageParameters>, R extends RequestBody, P extends ResponseBody> CompletableFuture<P> sendRequest(M messageHeaders, R request) { return sendRequest(messageHeaders, EmptyMessageParameters.getInstance(), request); } @VisibleForTesting <M extends MessageHeaders<EmptyRequestBody, P, EmptyMessageParameters>, P extends ResponseBody> CompletableFuture<P> sendRequest(M messageHeaders) { return sendRequest( messageHeaders, EmptyMessageParameters.getInstance(), EmptyRequestBody.getInstance()); } @VisibleForTesting public < M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P> sendRequest(M messageHeaders, U messageParameters, R request) { return sendRetriableRequest( messageHeaders, messageParameters, request, isConnectionProblemOrServiceUnavailable()); } private < M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P> sendRetriableRequest( M messageHeaders, U messageParameters, R request, Predicate<Throwable> retryPredicate) { return sendRetriableRequest( messageHeaders, messageParameters, request, Collections.emptyList(), retryPredicate, (receiver, error) -> { }); } private < M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P> sendRetriableRequest( M messageHeaders, U messageParameters, R request, Collection<FileUpload> filesToUpload, Predicate<Throwable> retryPredicate, BiConsumer<String, Throwable> consumer) { return retry( () -> getWebMonitorBaseUrl() .thenCompose( webMonitorBaseUrl -> { try { final CompletableFuture<P> future = restClient.sendRequest( webMonitorBaseUrl.getHost(), webMonitorBaseUrl.getPort(), messageHeaders, messageParameters, request, filesToUpload); future.whenComplete( (result, error) -> consumer.accept( webMonitorBaseUrl .toString(), error)); return future; } catch (IOException e) { throw new CompletionException(e); } }), retryPredicate); } private <C> CompletableFuture<C> retry( CheckedSupplier<CompletableFuture<C>> operation, Predicate<Throwable> retryPredicate) { return FutureUtils.retryWithDelay( CheckedSupplier.unchecked(operation), restClusterClientConfiguration.getRetryMaxAttempts(), Time.milliseconds(restClusterClientConfiguration.getRetryDelay()), retryPredicate, new ScheduledExecutorServiceAdapter(retryExecutorService)); } private static Predicate<Throwable> isConnectionProblemOrServiceUnavailable() { return isConnectionProblemException().or(isServiceUnavailable()); } private static Predicate<Throwable> isConnectionProblemException() { return (throwable) -> ExceptionUtils.findThrowable(throwable, java.net.ConnectException.class).isPresent() || ExceptionUtils.findThrowable( throwable, java.net.SocketTimeoutException.class) .isPresent() || ExceptionUtils.findThrowable(throwable, ConnectTimeoutException.class) .isPresent() || ExceptionUtils.findThrowable(throwable, IOException.class).isPresent(); } private static Predicate<Throwable> isServiceUnavailable() { return httpExceptionCodePredicate( code -> code == HttpResponseStatus.SERVICE_UNAVAILABLE.code()); } private static Predicate<Throwable> httpExceptionCodePredicate( Predicate<Integer> statusCodePredicate) { return (throwable) -> ExceptionUtils.findThrowable(throwable, RestClientException.class) .map( restClientException -> { final int code = restClientException.getHttpResponseStatus().code(); return statusCodePredicate.test(code); }) .orElse(false); } @VisibleForTesting CompletableFuture<URL> getWebMonitorBaseUrl() { return FutureUtils.orTimeout( webMonitorLeaderRetriever.getLeaderFuture(), restClusterClientConfiguration.getAwaitLeaderTimeout(), TimeUnit.MILLISECONDS) .thenApplyAsync( leaderAddressSessionId -> { final String url = leaderAddressSessionId.f0; try { return new URL(url); } catch (MalformedURLException e) { throw new IllegalArgumentException( "Could not parse URL from " + url, e); } }, executorService); } }
> How do you know you are getting metrics from the pools and they reflect correct values? Atm I trust the Micormeter code do do that. Let me try and add something ...
public void testNettyEventExecutorMetrics() { testNettyMetrics(2L, NettyEventExecutorMetrics.class); }
testNettyMetrics(2L, NettyEventExecutorMetrics.class);
public void testNettyEventExecutorMetrics() { testNettyMetrics(2L, NettyEventExecutorMetrics.class); }
class NettyMetricsTest { @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest() .withConfigurationResource("test-logging.properties") .overrideConfigKey("quarkus.micrometer.binder-enabled-default", "false") .overrideConfigKey("quarkus.micrometer.binder.netty.enabled", "true") .overrideConfigKey("quarkus.redis.devservices.enabled", "false"); @Inject @Any Instance<MeterBinder> binders; private void testNettyMetrics(long expected, Class<? extends MeterBinder> mbClass) { Assertions.assertFalse(binders.isUnsatisfied()); long count = binders.stream() .filter(mbClass::isInstance) .count(); Assertions.assertEquals(expected, count); } @Test public void testNettyAllocatorMetrics() { testNettyMetrics(5L, NettyAllocatorMetrics.class); } @Test }
class NettyMetricsTest { @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest() .withApplicationRoot(jar -> jar.addClasses(HelloResource.class)) .withConfigurationResource("test-logging.properties") .overrideConfigKey("quarkus.micrometer.binder-enabled-default", "false") .overrideConfigKey("quarkus.micrometer.binder.netty.enabled", "true") .overrideConfigKey("quarkus.redis.devservices.enabled", "false"); @Inject @Any Instance<MeterBinder> binders; @Inject MeterRegistry registry; @Inject Vertx vertx; private static final Set<Tag> NAM_PBBA_TAGS = Tags.of( "id", String.valueOf(PooledByteBufAllocator.DEFAULT.hashCode()), "allocator.type", "PooledByteBufAllocator") .stream() .collect(Collectors.toSet()); private static final Set<Tag> NAM_UNPBBA_TAGS = Tags.of( "id", String.valueOf(UnpooledByteBufAllocator.DEFAULT.hashCode()), "allocator.type", "UnpooledByteBufAllocator") .stream() .collect(Collectors.toSet()); private static final Set<Tag> VX_NAM_PBBA_TAGS = Tags.of( "id", String.valueOf(VertxByteBufAllocator.POOLED_ALLOCATOR.hashCode()), "allocator.type", "PooledByteBufAllocator") .stream() .collect(Collectors.toSet()); private static final Set<Tag> VX_NAM_UNPBBA_TAGS = Tags.of( "id", String.valueOf(VertxByteBufAllocator.UNPOOLED_ALLOCATOR.hashCode()), "allocator.type", "UnpooledByteBufAllocator") .stream() .collect(Collectors.toSet()); private static final Tag HEAP_MEMORY = Tag.of(AllocatorMemoryKeyNames.MEMORY_TYPE.asString(), "heap"); private static final Tag DIRECT_MEMORY = Tag.of(AllocatorMemoryKeyNames.MEMORY_TYPE.asString(), "direct"); enum AllocatorKeyNames implements KeyName { ID { public String asString() { return "id"; } }, ALLOCATOR_TYPE { public String asString() { return "allocator.type"; } }; } enum AllocatorMemoryKeyNames implements KeyName { MEMORY_TYPE { public String asString() { return "memory.type"; } }; } private void testNettyMetrics(long expected, Class<? extends MeterBinder> mbClass) { Assertions.assertFalse(binders.isUnsatisfied()); long count = binders.stream() .filter(mbClass::isInstance) .count(); Assertions.assertEquals(expected, count); } private static Double getValue(List<Meter> meters, Set<Tag> expected) { for (Meter meter : meters) { List<Tag> tags = meter.getId().getTags(); if (tags.containsAll(expected)) { return meter.match(Gauge::value, null, null, null, null, null, null, null, null); } } return null; } private static Set<Tag> tags(Set<Tag> tags, Tag tag) { Set<Tag> newTags = new HashSet<>(tags); newTags.add(tag); return newTags; } private void testAllocatorMetricsValues(Set<Tag> tags) { List<Meter> meters = registry.getMeters(); Double heap0 = getValue(meters, tags(tags, HEAP_MEMORY)); Assertions.assertNotNull(heap0); Double direct0 = getValue(meters, tags(tags, DIRECT_MEMORY)); Assertions.assertNotNull(direct0); RestAssured.get("/hello/Netty").then().body(Matchers.equalTo("hello Netty")); Double heap1 = getValue(meters, tags(tags, HEAP_MEMORY)); Double direct1 = getValue(meters, tags(tags, DIRECT_MEMORY)); Assertions.assertTrue(heap0 <= heap1); Assertions.assertTrue(direct0 <= direct1); } @Test public void testNettyAllocatorMetrics() { testNettyMetrics(5L, NettyAllocatorMetrics.class); } @Test @Test public void testAllocatorMetricsValues() { testAllocatorMetricsValues(NAM_PBBA_TAGS); testAllocatorMetricsValues(NAM_UNPBBA_TAGS); testAllocatorMetricsValues(VX_NAM_PBBA_TAGS); testAllocatorMetricsValues(VX_NAM_UNPBBA_TAGS); } @Test @Timeout(60L) public void testEventExecutorMetricsValues() throws Exception { VertxInternal vi = (VertxInternal) vertx; assertEventGroup(vi.getEventLoopGroup()); assertEventGroup(vi.getAcceptorEventLoopGroup()); } private void assertEventGroup(EventLoopGroup group) throws Exception { int tasks = 0; for (EventExecutor ee : group) { tasks++; } final CyclicBarrier allPendingTasksAreIn = new CyclicBarrier(tasks + 1); CountDownLatch waitCollectingMeasures = new CountDownLatch(1); List<Future<Future<?>>> pendingTasksCompleted = new ArrayList<>(tasks); for (EventExecutor eventLoop : group) { pendingTasksCompleted.add(eventLoop.submit(() -> { try { Future<?> pendingTask = eventLoop.submit(() -> { }); allPendingTasksAreIn.await(); waitCollectingMeasures.await(); return pendingTask; } catch (Throwable ignore) { return null; } })); } allPendingTasksAreIn.await(); List<Meter> meters = registry.getMeters(); for (EventExecutor eventLoop : group) { checkMetrics(meters, eventLoop, 1); } waitCollectingMeasures.countDown(); for (Future<Future<?>> pendingTaskCompleted : pendingTasksCompleted) { pendingTaskCompleted.get().get(); } for (EventExecutor eventLoop : group) { checkMetrics(meters, eventLoop, 0); } } private void checkMetrics(List<Meter> meters, EventExecutor executor, int expected) { if (executor instanceof SingleThreadEventExecutor) { SingleThreadEventExecutor stee = (SingleThreadEventExecutor) executor; int pendingTasks = stee.pendingTasks(); Assertions.assertEquals(expected, pendingTasks); Tag tag = Tag.of("name", stee.threadProperties().name()); Set<Tag> tags = Set.of(tag); Double metricsValue = getValue(meters, tags); Assertions.assertNotNull(metricsValue); int mvInt = metricsValue.intValue(); Assertions.assertEquals(expected, mvInt); } } }
@tuichenchuxin Please cast project to ColumnSegment.
private ASTNode createProjection(final ProjectionContext ctx, final AliasSegment alias, final ASTNode projection) { if (projection instanceof AggregationProjectionSegment) { ((AggregationProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof ExpressionProjectionSegment) { ((ExpressionProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof CommonExpressionSegment) { CommonExpressionSegment segment = (CommonExpressionSegment) projection; ExpressionProjectionSegment result = new ExpressionProjectionSegment(segment.getStartIndex(), segment.getStopIndex(), segment.getText(), segment); result.setAlias(alias); return result; } if (projection instanceof ColumnSegment) { ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText(), (ExpressionSegment) projection); result.setAlias(alias); return result; } if (projection instanceof SubqueryExpressionSegment) { SubqueryExpressionSegment subqueryExpressionSegment = (SubqueryExpressionSegment) projection; String text = ctx.start.getInputStream().getText(new Interval(subqueryExpressionSegment.getStartIndex(), subqueryExpressionSegment.getStopIndex())); SubqueryProjectionSegment result = new SubqueryProjectionSegment(subqueryExpressionSegment.getSubquery(), text); result.setAlias(alias); return result; } if (projection instanceof BinaryOperationExpression) { int startIndex = ((BinaryOperationExpression) projection).getStartIndex(); int stopIndex = null != alias ? alias.getStopIndex() : ((BinaryOperationExpression) projection).getStopIndex(); ExpressionProjectionSegment result = new ExpressionProjectionSegment(startIndex, stopIndex, ((BinaryOperationExpression) projection).getText(), (ExpressionSegment) projection); result.setAlias(alias); return result; } if (projection instanceof ParameterMarkerExpressionSegment) { return projection; } LiteralExpressionSegment column = (LiteralExpressionSegment) projection; ExpressionProjectionSegment result = null == alias ? new ExpressionProjectionSegment(column.getStartIndex(), column.getStopIndex(), String.valueOf(column.getLiterals()), (ExpressionSegment) projection) : new ExpressionProjectionSegment(column.getStartIndex(), ctx.alias().stop.getStopIndex(), String.valueOf(column.getLiterals()), (ExpressionSegment) projection); result.setAlias(alias); return result; }
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText(), (ExpressionSegment) projection);
private ASTNode createProjection(final ProjectionContext ctx, final AliasSegment alias, final ASTNode projection) { if (projection instanceof AggregationProjectionSegment) { ((AggregationProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof ExpressionProjectionSegment) { ((ExpressionProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof CommonExpressionSegment) { CommonExpressionSegment segment = (CommonExpressionSegment) projection; ExpressionProjectionSegment result = new ExpressionProjectionSegment(segment.getStartIndex(), segment.getStopIndex(), segment.getText(), segment); result.setAlias(alias); return result; } if (projection instanceof ColumnSegment) { ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText(), (ColumnSegment) projection); result.setAlias(alias); return result; } if (projection instanceof SubqueryExpressionSegment) { SubqueryExpressionSegment subqueryExpressionSegment = (SubqueryExpressionSegment) projection; String text = ctx.start.getInputStream().getText(new Interval(subqueryExpressionSegment.getStartIndex(), subqueryExpressionSegment.getStopIndex())); SubqueryProjectionSegment result = new SubqueryProjectionSegment(subqueryExpressionSegment.getSubquery(), text); result.setAlias(alias); return result; } if (projection instanceof BinaryOperationExpression) { int startIndex = ((BinaryOperationExpression) projection).getStartIndex(); int stopIndex = null != alias ? alias.getStopIndex() : ((BinaryOperationExpression) projection).getStopIndex(); ExpressionProjectionSegment result = new ExpressionProjectionSegment(startIndex, stopIndex, ((BinaryOperationExpression) projection).getText(), (BinaryOperationExpression) projection); result.setAlias(alias); return result; } if (projection instanceof ParameterMarkerExpressionSegment) { return projection; } LiteralExpressionSegment column = (LiteralExpressionSegment) projection; ExpressionProjectionSegment result = null == alias ? new ExpressionProjectionSegment(column.getStartIndex(), column.getStopIndex(), String.valueOf(column.getLiterals()), column) : new ExpressionProjectionSegment(column.getStartIndex(), ctx.alias().stop.getStopIndex(), String.valueOf(column.getLiterals()), column); result.setAlias(alias); return result; }
class MySQLStatementSQLVisitor extends MySQLStatementBaseVisitor<ASTNode> { private int currentParameterIndex; public MySQLStatementSQLVisitor(final Properties props) { } @Override public final ASTNode visitParameterMarker(final ParameterMarkerContext ctx) { return new ParameterMarkerValue(currentParameterIndex++); } @Override public final ASTNode visitLiterals(final LiteralsContext ctx) { if (null != ctx.stringLiterals()) { return visit(ctx.stringLiterals()); } if (null != ctx.numberLiterals()) { return visit(ctx.numberLiterals()); } if (null != ctx.temporalLiterals()) { return visit(ctx.temporalLiterals()); } if (null != ctx.hexadecimalLiterals()) { return visit(ctx.hexadecimalLiterals()); } if (null != ctx.bitValueLiterals()) { return visit(ctx.bitValueLiterals()); } if (null != ctx.booleanLiterals()) { return visit(ctx.booleanLiterals()); } if (null != ctx.nullValueLiterals()) { return visit(ctx.nullValueLiterals()); } throw new IllegalStateException("Literals must have string, number, dateTime, hex, bit, boolean or null."); } @Override public final ASTNode visitStringLiterals(final StringLiteralsContext ctx) { return new StringLiteralValue(ctx.getText()); } @Override public final ASTNode visitNumberLiterals(final NumberLiteralsContext ctx) { return new NumberLiteralValue(ctx.getText()); } @Override public ASTNode visitTemporalLiterals(final TemporalLiteralsContext ctx) { return new OtherLiteralValue(ctx.getText()); } @Override public final ASTNode visitHexadecimalLiterals(final HexadecimalLiteralsContext ctx) { return new OtherLiteralValue(ctx.getText()); } @Override public final ASTNode visitBitValueLiterals(final BitValueLiteralsContext ctx) { return new OtherLiteralValue(ctx.getText()); } @Override public final ASTNode visitBooleanLiterals(final BooleanLiteralsContext ctx) { return new BooleanLiteralValue(ctx.getText()); } @Override public final ASTNode visitNullValueLiterals(final NullValueLiteralsContext ctx) { return new OtherLiteralValue(ctx.getText()); } @Override public final ASTNode visitIdentifier(final IdentifierContext ctx) { return new IdentifierValue(ctx.getText()); } @Override public final ASTNode visitSchemaName(final SchemaNameContext ctx) { return visit(ctx.identifier()); } @Override public final ASTNode visitTableName(final TableNameContext ctx) { SimpleTableSegment result = new SimpleTableSegment(new TableNameSegment(ctx.name().getStart().getStartIndex(), ctx.name().getStop().getStopIndex(), new IdentifierValue(ctx.name().identifier().getText()))); OwnerContext owner = ctx.owner(); if (null != owner) { result.setOwner(new OwnerSegment(owner.getStart().getStartIndex(), owner.getStop().getStopIndex(), (IdentifierValue) visit(owner.identifier()))); } return result; } @Override public final ASTNode visitViewName(final ViewNameContext ctx) { SimpleTableSegment result = new SimpleTableSegment(new TableNameSegment(ctx.identifier().getStart().getStartIndex(), ctx.identifier().getStop().getStopIndex(), new IdentifierValue(ctx.identifier().getText()))); OwnerContext owner = ctx.owner(); if (null != owner) { result.setOwner(new OwnerSegment(owner.getStart().getStartIndex(), owner.getStop().getStopIndex(), (IdentifierValue) visit(owner.identifier()))); } return result; } @Override public final ASTNode visitColumnName(final ColumnNameContext ctx) { return new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier())); } @Override public final ASTNode visitIndexName(final IndexNameContext ctx) { return new IndexSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier())); } @Override public ASTNode visitTableList(final TableListContext ctx) { CollectionValue<SimpleTableSegment> result = new CollectionValue<>(); for (TableNameContext each : ctx.tableName()) { result.getValue().add((SimpleTableSegment) visit(each)); } return result; } @Override public final ASTNode visitViewNames(final ViewNamesContext ctx) { CollectionValue<SimpleTableSegment> result = new CollectionValue<>(); for (ViewNameContext each : ctx.viewName()) { result.getValue().add((SimpleTableSegment) visit(each)); } return result; } @Override public final ASTNode visitColumnNames(final ColumnNamesContext ctx) { CollectionValue<ColumnSegment> result = new CollectionValue<>(); for (ColumnNameContext each : ctx.columnName()) { result.getValue().add((ColumnSegment) visit(each)); } return result; } @Override public final ASTNode visitExpr(final ExprContext ctx) { if (null != ctx.booleanPrimary()) { return visit(ctx.booleanPrimary()); } if (null != ctx.XOR()) { ExpressionSegment left = (ExpressionSegment) visit(ctx.expr(0)); ExpressionSegment right = (ExpressionSegment) visit(ctx.expr(1)); String operator = "XOR"; String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } if (null != ctx.logicalOperator()) { String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (ExpressionSegment) visit(ctx.expr(0)), (ExpressionSegment) visit(ctx.expr(1)), ctx.logicalOperator().getText(), text); } return new NotExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (ExpressionSegment) visit(ctx.expr(0))); } @Override public final ASTNode visitBooleanPrimary(final BooleanPrimaryContext ctx) { if (null != ctx.IS()) { ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary()); ExpressionSegment right = new LiteralExpressionSegment(ctx.IS().getSymbol().getStopIndex() + 1, ctx.stop.getStopIndex(), new Interval(ctx.IS().getSymbol().getStopIndex() + 1, ctx.stop.getStopIndex())); String operator = "IS"; String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } if (null != ctx.comparisonOperator() || null != ctx.SAFE_EQ_()) { return createCompareSegment(ctx); } if (null != ctx.assignmentOperator()) { return createAssignmentSegment(ctx); } return visit(ctx.predicate()); } private ASTNode createAssignmentSegment(final BooleanPrimaryContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary()); ExpressionSegment right = (ExpressionSegment) visit(ctx.predicate()); String operator = ctx.assignmentOperator().getText(); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } private ASTNode createCompareSegment(final BooleanPrimaryContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary()); ExpressionSegment right; if (null != ctx.predicate()) { right = (ExpressionSegment) visit(ctx.predicate()); } else { right = new SubqueryExpressionSegment(new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery()))); } String operator = null != ctx.SAFE_EQ_() ? ctx.SAFE_EQ_().getText() : ctx.comparisonOperator().getText(); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } @Override public final ASTNode visitPredicate(final PredicateContext ctx) { if (null != ctx.IN()) { return createInSegment(ctx); } if (null != ctx.BETWEEN()) { return createBetweenSegment(ctx); } if (null != ctx.LIKE()) { return createBinaryOperationExpressionFromLike(ctx); } if (null != ctx.REGEXP()) { return createBinaryOperationExpressionFromRegexp(ctx); } return visit(ctx.bitExpr(0)); } private InExpression createInSegment(final PredicateContext ctx) { boolean not = null != ctx.NOT(); ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0)); ExpressionSegment right; if (null != ctx.subquery()) { right = new SubqueryExpressionSegment(new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery()))); } else { right = new ListExpression(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex()); for (ExprContext each : ctx.expr()) { ((ListExpression) right).getItems().add((ExpressionSegment) visit(each)); } } return new InExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, not); } private BinaryOperationExpression createBinaryOperationExpressionFromLike(final PredicateContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0)); String operator; ExpressionSegment right; if (null != ctx.SOUNDS()) { right = (ExpressionSegment) visit(ctx.bitExpr(1)); operator = "SOUNDS LIKE"; } else { ListExpression listExpression = new ListExpression(ctx.simpleExpr(0).start.getStartIndex(), ctx.simpleExpr().get(ctx.simpleExpr().size() - 1).stop.getStopIndex()); for (SimpleExprContext each : ctx.simpleExpr()) { listExpression.getItems().add((ExpressionSegment) visit(each)); } right = listExpression; operator = null != ctx.NOT() ? "NOT LIKE" : "LIKE"; } String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } private BinaryOperationExpression createBinaryOperationExpressionFromRegexp(final PredicateContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0)); ExpressionSegment right = (ExpressionSegment) visit(ctx.bitExpr(1)); String operator = null != ctx.NOT() ? "NOT REGEXP" : "REGEXP"; String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } private BetweenExpression createBetweenSegment(final PredicateContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0)); ExpressionSegment between = (ExpressionSegment) visit(ctx.bitExpr(1)); ExpressionSegment and = (ExpressionSegment) visit(ctx.predicate()); boolean not = null != ctx.NOT(); return new BetweenExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, between, and, not); } @Override public final ASTNode visitBitExpr(final BitExprContext ctx) { if (null != ctx.simpleExpr()) { return visit(ctx.simpleExpr()); } ExpressionSegment left = (ExpressionSegment) visit(ctx.getChild(0)); ExpressionSegment right = (ExpressionSegment) visit(ctx.getChild(2)); String operator = ctx.getChild(1).getText(); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } @Override public final ASTNode visitSimpleExpr(final SimpleExprContext ctx) { int startIndex = ctx.start.getStartIndex(); int stopIndex = ctx.stop.getStopIndex(); if (null != ctx.subquery()) { SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().getStart().getStartIndex(), ctx.subquery().getStop().getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery())); if (null != ctx.EXISTS()) { return new ExistsSubqueryExpression(startIndex, stopIndex, subquerySegment); } return new SubqueryExpressionSegment(subquerySegment); } if (null != ctx.parameterMarker()) { return new ParameterMarkerExpressionSegment(startIndex, stopIndex, ((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue()); } if (null != ctx.literals()) { return SQLUtil.createLiteralExpression(visit(ctx.literals()), startIndex, stopIndex, ctx.literals().start.getInputStream().getText(new Interval(startIndex, stopIndex))); } if (null != ctx.intervalExpression()) { return visit(ctx.intervalExpression()); } if (null != ctx.functionCall()) { return visit(ctx.functionCall()); } if (null != ctx.columnRef()) { return visit(ctx.columnRef()); } if (null != ctx.matchExpression()) { return visit(ctx.matchExpression()); } if (null != ctx.notOperator()) { ASTNode expression = visit(ctx.simpleExpr(0)); if (expression instanceof ExistsSubqueryExpression) { ((ExistsSubqueryExpression) expression).setNot(true); return expression; } return new NotExpression(startIndex, stopIndex, (ExpressionSegment) expression); } if (null != ctx.LP_() && 1 == ctx.expr().size()) { return visit(ctx.expr(0)); } return visitRemainSimpleExpr(ctx); } @Override public ASTNode visitColumnRef(final ColumnRefContext ctx) { int identifierCount = ctx.identifier().size(); ColumnSegment result; if (1 == identifierCount) { result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(0))); } else if (2 == identifierCount) { result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(1))); result.setOwner(new OwnerSegment(ctx.identifier(0).start.getStartIndex(), ctx.identifier(0).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(0)))); } else { result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(2))); result.setOwner(new OwnerSegment(ctx.identifier(1).start.getStartIndex(), ctx.identifier(1).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(1)))); } return result; } @Override public ASTNode visitSubquery(final SubqueryContext ctx) { return visit(ctx.queryExpressionParens()); } @Override public ASTNode visitQueryExpressionParens(final QueryExpressionParensContext ctx) { if (null != ctx.queryExpressionParens()) { return visit(ctx.queryExpressionParens()); } MySQLSelectStatement result = (MySQLSelectStatement) visit(ctx.queryExpression()); if (null != ctx.lockClauseList()) { result.setLock((LockSegment) visit(ctx.lockClauseList())); } result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitLockClauseList(final LockClauseListContext ctx) { LockSegment result = new LockSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); for (MySQLStatementParser.LockClauseContext each : ctx.lockClause()) { if (null != each.tableLockingList()) { result.getTables().addAll(generateTablesFromTableAliasRefList(each.tableLockingList().tableAliasRefList())); } } return result; } @Override public ASTNode visitQueryExpression(final QueryExpressionContext ctx) { MySQLSelectStatement result; if (null != ctx.queryExpressionBody()) { result = (MySQLSelectStatement) visit(ctx.queryExpressionBody()); } else { result = (MySQLSelectStatement) visit(ctx.queryExpressionParens()); } if (null != ctx.orderByClause()) { result.setOrderBy((OrderBySegment) visit(ctx.orderByClause())); } if (null != ctx.limitClause()) { result.setLimit((LimitSegment) visit(ctx.limitClause())); } return result; } @Override public ASTNode visitSelectWithInto(final SelectWithIntoContext ctx) { if (null != ctx.selectWithInto()) { return visit(ctx.selectWithInto()); } MySQLSelectStatement result = (MySQLSelectStatement) visit(ctx.queryExpression()); if (null != ctx.lockClauseList()) { result.setLock((LockSegment) visit(ctx.lockClauseList())); } return result; } @Override public ASTNode visitQueryExpressionBody(final QueryExpressionBodyContext ctx) { if (1 == ctx.getChildCount() && ctx.getChild(0) instanceof QueryPrimaryContext) { return visit(ctx.queryPrimary()); } throw new IllegalStateException("union select is not supported yet."); } @Override public ASTNode visitQuerySpecification(final QuerySpecificationContext ctx) { MySQLSelectStatement result = new MySQLSelectStatement(); result.setProjections((ProjectionsSegment) visit(ctx.projections())); if (null != ctx.selectSpecification()) { result.getProjections().setDistinctRow(isDistinct(ctx)); } if (null != ctx.fromClause() && null != ctx.fromClause().tableReferences()) { TableSegment tableSource = (TableSegment) visit(ctx.fromClause().tableReferences()); result.setFrom(tableSource); } if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.groupByClause()) { result.setGroupBy((GroupBySegment) visit(ctx.groupByClause())); } if (null != ctx.havingClause()) { result.setHaving((HavingSegment) visit(ctx.havingClause())); } if (null != ctx.windowClause()) { result.setWindow((WindowSegment) visit(ctx.windowClause())); } return result; } @Override public ASTNode visitWindowClause(final WindowClauseContext ctx) { return new WindowSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); } @Override public ASTNode visitHavingClause(final HavingClauseContext ctx) { ExpressionSegment expr = (ExpressionSegment) visit(ctx.expr()); return new HavingSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), expr); } @Override public final ASTNode visitIntervalExpression(final IntervalExpressionContext ctx) { calculateParameterCount(Collections.singleton(ctx.intervalValue().expr())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } @Override public final ASTNode visitFunctionCall(final FunctionCallContext ctx) { if (null != ctx.aggregationFunction()) { return visit(ctx.aggregationFunction()); } if (null != ctx.specialFunction()) { return visit(ctx.specialFunction()); } if (null != ctx.regularFunction()) { return visit(ctx.regularFunction()); } throw new IllegalStateException("FunctionCallContext must have aggregationFunction, regularFunction or specialFunction."); } @Override public final ASTNode visitAggregationFunction(final AggregationFunctionContext ctx) { String aggregationType = ctx.aggregationFunctionName().getText(); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return AggregationType.isAggregationType(aggregationType) ? createAggregationSegment(ctx, aggregationType) : new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } private ASTNode createAggregationSegment(final AggregationFunctionContext ctx, final String aggregationType) { AggregationType type = AggregationType.valueOf(aggregationType.toUpperCase()); String innerExpression = ctx.start.getInputStream().getText(new Interval(ctx.LP_().getSymbol().getStartIndex(), ctx.stop.getStopIndex())); if (null == ctx.distinct()) { return new AggregationProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), type, innerExpression); } return new AggregationDistinctProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), type, innerExpression, getDistinctExpression(ctx)); } private String getDistinctExpression(final AggregationFunctionContext ctx) { StringBuilder result = new StringBuilder(); for (int i = 3; i < ctx.getChildCount() - 1; i++) { result.append(ctx.getChild(i).getText()); } return result.toString(); } @Override public final ASTNode visitSpecialFunction(final SpecialFunctionContext ctx) { if (null != ctx.groupConcatFunction()) { return visit(ctx.groupConcatFunction()); } if (null != ctx.windowFunction()) { return visit(ctx.windowFunction()); } if (null != ctx.castFunction()) { return visit(ctx.castFunction()); } if (null != ctx.convertFunction()) { return visit(ctx.convertFunction()); } if (null != ctx.positionFunction()) { return visit(ctx.positionFunction()); } if (null != ctx.substringFunction()) { return visit(ctx.substringFunction()); } if (null != ctx.extractFunction()) { return visit(ctx.extractFunction()); } if (null != ctx.charFunction()) { return visit(ctx.charFunction()); } if (null != ctx.weightStringFunction()) { return visit(ctx.weightStringFunction()); } String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitGroupConcatFunction(final GroupConcatFunctionContext ctx) { calculateParameterCount(ctx.expr()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitWindowFunction(final WindowFunctionContext ctx) { super.visitWindowFunction(ctx); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitCastFunction(final CastFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitConvertFunction(final ConvertFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitPositionFunction(final PositionFunctionContext ctx) { calculateParameterCount(ctx.expr()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitSubstringFunction(final SubstringFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitExtractFunction(final ExtractFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitCharFunction(final CharFunctionContext ctx) { calculateParameterCount(ctx.expr()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitWeightStringFunction(final WeightStringFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitRegularFunction(final RegularFunctionContext ctx) { if (null != ctx.completeRegularFunction()) { calculateParameterCount(ctx.completeRegularFunction().expr()); } String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } private ASTNode visitRemainSimpleExpr(final SimpleExprContext ctx) { if (null != ctx.caseExpression()) { visit(ctx.caseExpression()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } for (ExprContext each : ctx.expr()) { visit(each); } for (SimpleExprContext each : ctx.simpleExpr()) { visit(each); } String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitMatchExpression(final MatchExpressionContext ctx) { visit(ctx.expr()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } private void calculateParameterCount(final Collection<ExprContext> exprContexts) { for (ExprContext each : exprContexts) { visit(each); } } @Override public final ASTNode visitDataType(final DataTypeContext ctx) { DataTypeSegment result = new DataTypeSegment(); result.setDataTypeName(ctx.dataTypeName.getText()); result.setStartIndex(ctx.start.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); if (null != ctx.fieldLength()) { DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.fieldLength()); result.setDataLength(dataTypeLengthSegment); } if (null != ctx.precision()) { DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.precision()); result.setDataLength(dataTypeLengthSegment); } return result; } @Override public ASTNode visitFieldLength(final FieldLengthContext ctx) { DataTypeLengthSegment result = new DataTypeLengthSegment(); result.setStartIndex(ctx.start.getStartIndex()); result.setStopIndex(ctx.stop.getStartIndex()); result.setPrecision(Integer.parseInt(ctx.length.getText())); return result; } @Override public ASTNode visitPrecision(final PrecisionContext ctx) { DataTypeLengthSegment result = new DataTypeLengthSegment(); result.setStartIndex(ctx.start.getStartIndex()); result.setStopIndex(ctx.stop.getStartIndex()); List<TerminalNode> numbers = ctx.NUMBER_(); result.setPrecision(Integer.parseInt(numbers.get(0).getText())); result.setScale(Integer.parseInt(numbers.get(1).getText())); return result; } @Override public final ASTNode visitOrderByClause(final OrderByClauseContext ctx) { Collection<OrderByItemSegment> items = new LinkedList<>(); for (OrderByItemContext each : ctx.orderByItem()) { items.add((OrderByItemSegment) visit(each)); } return new OrderBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items); } @Override public final ASTNode visitOrderByItem(final OrderByItemContext ctx) { OrderDirection orderDirection; if (null != ctx.direction()) { orderDirection = null != ctx.direction().DESC() ? OrderDirection.DESC : OrderDirection.ASC; } else { orderDirection = OrderDirection.ASC; } if (null != ctx.numberLiterals()) { return new IndexOrderByItemSegment(ctx.numberLiterals().getStart().getStartIndex(), ctx.numberLiterals().getStop().getStopIndex(), SQLUtil.getExactlyNumber(ctx.numberLiterals().getText(), 10).intValue(), orderDirection); } else { ASTNode expr = visitExpr(ctx.expr()); if (expr instanceof ColumnSegment) { return new ColumnOrderByItemSegment((ColumnSegment) expr, orderDirection); } else { return new ExpressionOrderByItemSegment(ctx.expr().getStart().getStartIndex(), ctx.expr().getStop().getStopIndex(), ctx.expr().getText(), orderDirection, (ExpressionSegment) expr); } } } @Override public ASTNode visitInsert(final InsertContext ctx) { MySQLInsertStatement result; if (null != ctx.insertValuesClause()) { result = (MySQLInsertStatement) visit(ctx.insertValuesClause()); } else if (null != ctx.insertSelectClause()) { result = (MySQLInsertStatement) visit(ctx.insertSelectClause()); } else { result = new MySQLInsertStatement(); result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause())); } if (null != ctx.onDuplicateKeyClause()) { result.setOnDuplicateKeyColumns((OnDuplicateKeyColumnsSegment) visit(ctx.onDuplicateKeyClause())); } result.setTable((SimpleTableSegment) visit(ctx.tableName())); result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitInsertSelectClause(final InsertSelectClauseContext ctx) { MySQLInsertStatement result = new MySQLInsertStatement(); if (null != ctx.LP_()) { if (null != ctx.fields()) { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields()))); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList())); } } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.setInsertSelect(createInsertSelectSegment(ctx)); return result; } private SubquerySegment createInsertSelectSegment(final InsertSelectClauseContext ctx) { MySQLSelectStatement selectStatement = (MySQLSelectStatement) visit(ctx.select()); return new SubquerySegment(ctx.select().start.getStartIndex(), ctx.select().stop.getStopIndex(), selectStatement); } @Override public ASTNode visitInsertValuesClause(final InsertValuesClauseContext ctx) { MySQLInsertStatement result = new MySQLInsertStatement(); if (null != ctx.LP_()) { if (null != ctx.fields()) { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields()))); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList())); } } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.getValues().addAll(createInsertValuesSegments(ctx.assignmentValues())); return result; } private Collection<InsertValuesSegment> createInsertValuesSegments(final Collection<MySQLStatementParser.AssignmentValuesContext> assignmentValuesContexts) { Collection<InsertValuesSegment> result = new LinkedList<>(); for (MySQLStatementParser.AssignmentValuesContext each : assignmentValuesContexts) { result.add((InsertValuesSegment) visit(each)); } return result; } @Override public ASTNode visitOnDuplicateKeyClause(final OnDuplicateKeyClauseContext ctx) { Collection<AssignmentSegment> columns = new LinkedList<>(); for (MySQLStatementParser.AssignmentContext each : ctx.assignment()) { columns.add((AssignmentSegment) visit(each)); } return new OnDuplicateKeyColumnsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), columns); } @Override public ASTNode visitReplace(final ReplaceContext ctx) { MySQLInsertStatement result; if (null != ctx.replaceValuesClause()) { result = (MySQLInsertStatement) visit(ctx.replaceValuesClause()); } else if (null != ctx.replaceSelectClause()) { result = (MySQLInsertStatement) visit(ctx.replaceSelectClause()); } else { result = new MySQLInsertStatement(); result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause())); } result.setTable((SimpleTableSegment) visit(ctx.tableName())); result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitReplaceSelectClause(final ReplaceSelectClauseContext ctx) { MySQLInsertStatement result = new MySQLInsertStatement(); if (null != ctx.LP_()) { if (null != ctx.fields()) { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields()))); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList())); } } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.setInsertSelect(createReplaceSelectSegment(ctx)); return result; } private SubquerySegment createReplaceSelectSegment(final ReplaceSelectClauseContext ctx) { MySQLSelectStatement selectStatement = (MySQLSelectStatement) visit(ctx.select()); return new SubquerySegment(ctx.select().start.getStartIndex(), ctx.select().stop.getStopIndex(), selectStatement); } @Override public ASTNode visitReplaceValuesClause(final ReplaceValuesClauseContext ctx) { MySQLInsertStatement result = new MySQLInsertStatement(); if (null != ctx.LP_()) { if (null != ctx.fields()) { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields()))); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList())); } } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.getValues().addAll(createReplaceValuesSegments(ctx.assignmentValues())); return result; } private List<ColumnSegment> createInsertColumns(final FieldsContext fields) { List<ColumnSegment> result = new LinkedList<>(); for (InsertIdentifierContext each : fields.insertIdentifier()) { result.add((ColumnSegment) visit(each)); } return result; } private Collection<InsertValuesSegment> createReplaceValuesSegments(final Collection<MySQLStatementParser.AssignmentValuesContext> assignmentValuesContexts) { Collection<InsertValuesSegment> result = new LinkedList<>(); for (MySQLStatementParser.AssignmentValuesContext each : assignmentValuesContexts) { result.add((InsertValuesSegment) visit(each)); } return result; } @Override public ASTNode visitUpdate(final UpdateContext ctx) { MySQLUpdateStatement result = new MySQLUpdateStatement(); TableSegment tableSegment = (TableSegment) visit(ctx.tableReferences()); result.setTableSegment(tableSegment); result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause())); if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.orderByClause()) { result.setOrderBy((OrderBySegment) visit(ctx.orderByClause())); } if (null != ctx.limitClause()) { result.setLimit((LimitSegment) visit(ctx.limitClause())); } result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitSetAssignmentsClause(final SetAssignmentsClauseContext ctx) { Collection<AssignmentSegment> assignments = new LinkedList<>(); for (MySQLStatementParser.AssignmentContext each : ctx.assignment()) { assignments.add((AssignmentSegment) visit(each)); } return new SetAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), assignments); } @Override public ASTNode visitAssignmentValues(final AssignmentValuesContext ctx) { List<ExpressionSegment> segments = new LinkedList<>(); for (MySQLStatementParser.AssignmentValueContext each : ctx.assignmentValue()) { segments.add((ExpressionSegment) visit(each)); } return new InsertValuesSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), segments); } @Override public ASTNode visitAssignment(final AssignmentContext ctx) { ColumnSegment column = (ColumnSegment) visit(ctx.columnRef()); ExpressionSegment value = (ExpressionSegment) visit(ctx.assignmentValue()); return new AssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, value); } @Override public ASTNode visitAssignmentValue(final AssignmentValueContext ctx) { ExprContext expr = ctx.expr(); if (null != expr) { ASTNode result = visit(expr); if (result instanceof ColumnSegment) { return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } else { return result; } } return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } @Override public ASTNode visitBlobValue(final BlobValueContext ctx) { return new StringLiteralValue(ctx.string_().getText()); } @Override public ASTNode visitDelete(final DeleteContext ctx) { MySQLDeleteStatement result = new MySQLDeleteStatement(); if (null != ctx.multipleTablesClause()) { result.setTableSegment((TableSegment) visit(ctx.multipleTablesClause())); } else { result.setTableSegment((TableSegment) visit(ctx.singleTableClause())); } if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.orderByClause()) { result.setOrderBy((OrderBySegment) visit(ctx.orderByClause())); } if (null != ctx.limitClause()) { result.setLimit((LimitSegment) visit(ctx.limitClause())); } result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitSingleTableClause(final SingleTableClauseContext ctx) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } @Override public ASTNode visitMultipleTablesClause(final MultipleTablesClauseContext ctx) { DeleteMultiTableSegment result = new DeleteMultiTableSegment(); TableSegment relateTableSource = (TableSegment) visit(ctx.tableReferences()); result.setRelationTable(relateTableSource); result.setActualDeleteTables(generateTablesFromTableAliasRefList(ctx.tableAliasRefList())); return result; } private List<SimpleTableSegment> generateTablesFromTableAliasRefList(final TableAliasRefListContext ctx) { List<SimpleTableSegment> result = new LinkedList<>(); for (MySQLStatementParser.TableIdentOptWildContext each : ctx.tableIdentOptWild()) { result.add((SimpleTableSegment) visit(each.tableName())); } return result; } @Override public ASTNode visitSelect(final SelectContext ctx) { MySQLSelectStatement result; if (null != ctx.queryExpression()) { result = (MySQLSelectStatement) visit(ctx.queryExpression()); if (null != ctx.lockClauseList()) { result.setLock((LockSegment) visit(ctx.lockClauseList())); } } else if (null != ctx.selectWithInto()) { result = (MySQLSelectStatement) visit(ctx.selectWithInto()); } else { result = (MySQLSelectStatement) visit(ctx.getChild(0)); } result.setParameterCount(currentParameterIndex); return result; } private boolean isDistinct(final QuerySpecificationContext ctx) { for (MySQLStatementParser.SelectSpecificationContext each : ctx.selectSpecification()) { if (((BooleanLiteralValue) visit(each)).getValue()) { return true; } } return false; } @Override public ASTNode visitSelectSpecification(final SelectSpecificationContext ctx) { if (null != ctx.duplicateSpecification()) { return visit(ctx.duplicateSpecification()); } return new BooleanLiteralValue(false); } @Override public ASTNode visitDuplicateSpecification(final DuplicateSpecificationContext ctx) { String text = ctx.getText(); if ("DISTINCT".equalsIgnoreCase(text) || "DISTINCTROW".equalsIgnoreCase(text)) { return new BooleanLiteralValue(true); } return new BooleanLiteralValue(false); } @Override public ASTNode visitProjections(final ProjectionsContext ctx) { Collection<ProjectionSegment> projections = new LinkedList<>(); if (null != ctx.unqualifiedShorthand()) { projections.add(new ShorthandProjectionSegment(ctx.unqualifiedShorthand().getStart().getStartIndex(), ctx.unqualifiedShorthand().getStop().getStopIndex())); } for (MySQLStatementParser.ProjectionContext each : ctx.projection()) { projections.add((ProjectionSegment) visit(each)); } ProjectionsSegment result = new ProjectionsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); result.getProjections().addAll(projections); return result; } @Override public ASTNode visitProjection(final ProjectionContext ctx) { if (null != ctx.qualifiedShorthand()) { QualifiedShorthandContext shorthand = ctx.qualifiedShorthand(); ShorthandProjectionSegment result = new ShorthandProjectionSegment(shorthand.getStart().getStartIndex(), shorthand.getStop().getStopIndex()); IdentifierValue identifier = new IdentifierValue(shorthand.identifier().getText()); result.setOwner(new OwnerSegment(shorthand.identifier().getStart().getStartIndex(), shorthand.identifier().getStop().getStopIndex(), identifier)); return result; } AliasSegment alias = null == ctx.alias() ? null : (AliasSegment) visit(ctx.alias()); ASTNode exprProjection = visit(ctx.expr()); if (exprProjection instanceof ColumnSegment) { ColumnProjectionSegment result = new ColumnProjectionSegment((ColumnSegment) exprProjection); result.setAlias(alias); return result; } if (exprProjection instanceof SubquerySegment) { SubquerySegment subquerySegment = (SubquerySegment) exprProjection; String text = ctx.start.getInputStream().getText(new Interval(subquerySegment.getStartIndex(), subquerySegment.getStopIndex())); SubqueryProjectionSegment result = new SubqueryProjectionSegment((SubquerySegment) exprProjection, text); result.setAlias(alias); return result; } if (exprProjection instanceof ExistsSubqueryExpression) { ExistsSubqueryExpression existsSubqueryExpression = (ExistsSubqueryExpression) exprProjection; String text = ctx.start.getInputStream().getText(new Interval(existsSubqueryExpression.getStartIndex(), existsSubqueryExpression.getStopIndex())); SubqueryProjectionSegment result = new SubqueryProjectionSegment(((ExistsSubqueryExpression) exprProjection).getSubquery(), text); result.setAlias(alias); return result; } return createProjection(ctx, alias, exprProjection); } @Override public ASTNode visitAlias(final AliasContext ctx) { return new AliasSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), new IdentifierValue(ctx.textOrIdentifier().getText())); } @Override public ASTNode visitFromClause(final FromClauseContext ctx) { return visit(ctx.tableReferences()); } @Override public ASTNode visitTableReferences(final TableReferencesContext ctx) { TableSegment result = (TableSegment) visit(ctx.tableReference(0)); if (ctx.tableReference().size() > 1) { for (int i = 1; i < ctx.tableReference().size(); i++) { result = generateJoinTableSourceFromEscapedTableReference(ctx.tableReference(i), result); } } return result; } private JoinTableSegment generateJoinTableSourceFromEscapedTableReference(final TableReferenceContext ctx, final TableSegment tableSegment) { JoinTableSegment result = new JoinTableSegment(); result.setStartIndex(tableSegment.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); result.setLeft(tableSegment); result.setRight((TableSegment) visit(ctx)); return result; } @Override public ASTNode visitEscapedTableReference(final EscapedTableReferenceContext ctx) { TableSegment result; TableSegment left; left = (TableSegment) visit(ctx.tableFactor()); for (MySQLStatementParser.JoinedTableContext each : ctx.joinedTable()) { left = visitJoinedTable(each, left); } result = left; return result; } @Override public ASTNode visitTableReference(final TableReferenceContext ctx) { TableSegment result; TableSegment left; left = null != ctx.tableFactor() ? (TableSegment) visit(ctx.tableFactor()) : (TableSegment) visit(ctx.escapedTableReference()); for (MySQLStatementParser.JoinedTableContext each : ctx.joinedTable()) { left = visitJoinedTable(each, left); } result = left; return result; } @Override public ASTNode visitTableFactor(final TableFactorContext ctx) { if (null != ctx.subquery()) { MySQLSelectStatement subquery = (MySQLSelectStatement) visit(ctx.subquery()); SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), subquery); SubqueryTableSegment result = new SubqueryTableSegment(subquerySegment); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } if (null != ctx.tableName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } return visit(ctx.tableReferences()); } private JoinTableSegment visitJoinedTable(final JoinedTableContext ctx, final TableSegment tableSegment) { JoinTableSegment result = new JoinTableSegment(); result.setLeft(tableSegment); result.setStartIndex(tableSegment.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); result.setJoinType(getJoinType(ctx)); TableSegment right = null != ctx.tableFactor() ? (TableSegment) visit(ctx.tableFactor()) : (TableSegment) visit(ctx.tableReference()); result.setRight(right); if (null != ctx.joinSpecification()) { result = visitJoinSpecification(ctx.joinSpecification(), result); } return result; } private String getJoinType(final JoinedTableContext ctx) { String joinType = null; if (null != ctx.innerJoinType()) { joinType = ctx.innerJoinType().JOIN() != null ? JoinType.MYSQL_INNER_JOIN.getJoinType() : JoinType.MYSQL_STRAIGHT_JOIN.getJoinType(); } else if (null != ctx.outerJoinType()) { joinType = ctx.outerJoinType().LEFT() != null ? JoinType.MYSQL_LEFT_JOIN.getJoinType() : JoinType.MYSQL_RIGHT_JOIN.getJoinType(); } else if (null != ctx.naturalJoinType()) { if (null != ctx.naturalJoinType().LEFT()) { joinType = JoinType.MYSQL_NATURAL_LEFT_JOIN.getJoinType(); } else if (null != ctx.naturalJoinType().RIGHT()) { joinType = JoinType.MYSQL_NATURAL_RIGHT_JOIN.getJoinType(); } else { joinType = JoinType.MYSQL_NATURAL_INNER_JOIN.getJoinType(); } } return joinType; } private JoinTableSegment visitJoinSpecification(final JoinSpecificationContext ctx, final JoinTableSegment joinTableSource) { if (null != ctx.expr()) { ExpressionSegment condition = (ExpressionSegment) visit(ctx.expr()); joinTableSource.setCondition(condition); } if (null != ctx.USING()) { List<ColumnSegment> columnSegmentList = new LinkedList<>(); for (MySQLStatementParser.ColumnNameContext cname : ctx.columnNames().columnName()) { columnSegmentList.add((ColumnSegment) visit(cname)); } joinTableSource.setUsing(columnSegmentList); } return joinTableSource; } @Override public ASTNode visitWhereClause(final WhereClauseContext ctx) { ASTNode segment = visit(ctx.expr()); return new WhereSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ExpressionSegment) segment); } @Override public ASTNode visitGroupByClause(final GroupByClauseContext ctx) { Collection<OrderByItemSegment> items = new LinkedList<>(); for (OrderByItemContext each : ctx.orderByItem()) { items.add((OrderByItemSegment) visit(each)); } return new GroupBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items); } @Override public ASTNode visitLimitClause(final LimitClauseContext ctx) { if (null == ctx.limitOffset()) { return new LimitSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), null, (PaginationValueSegment) visit(ctx.limitRowCount())); } PaginationValueSegment rowCount; PaginationValueSegment offset; if (null != ctx.OFFSET()) { rowCount = (PaginationValueSegment) visit(ctx.limitRowCount()); offset = (PaginationValueSegment) visit(ctx.limitOffset()); } else { offset = (PaginationValueSegment) visit(ctx.limitOffset()); rowCount = (PaginationValueSegment) visit(ctx.limitRowCount()); } return new LimitSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), offset, rowCount); } @Override public ASTNode visitLimitRowCount(final LimitRowCountContext ctx) { if (null != ctx.numberLiterals()) { return new NumberLiteralLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((NumberLiteralValue) visit(ctx.numberLiterals())).getValue().longValue()); } return new ParameterMarkerLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue()); } @Override public final ASTNode visitConstraintName(final ConstraintNameContext ctx) { return new ConstraintSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier())); } @Override public ASTNode visitLimitOffset(final LimitOffsetContext ctx) { if (null != ctx.numberLiterals()) { return new NumberLiteralLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((NumberLiteralValue) visit(ctx.numberLiterals())).getValue().longValue()); } return new ParameterMarkerLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue()); } }
class MySQLStatementSQLVisitor extends MySQLStatementBaseVisitor<ASTNode> { private int currentParameterIndex; public MySQLStatementSQLVisitor(final Properties props) { } @Override public final ASTNode visitParameterMarker(final ParameterMarkerContext ctx) { return new ParameterMarkerValue(currentParameterIndex++); } @Override public final ASTNode visitLiterals(final LiteralsContext ctx) { if (null != ctx.stringLiterals()) { return visit(ctx.stringLiterals()); } if (null != ctx.numberLiterals()) { return visit(ctx.numberLiterals()); } if (null != ctx.temporalLiterals()) { return visit(ctx.temporalLiterals()); } if (null != ctx.hexadecimalLiterals()) { return visit(ctx.hexadecimalLiterals()); } if (null != ctx.bitValueLiterals()) { return visit(ctx.bitValueLiterals()); } if (null != ctx.booleanLiterals()) { return visit(ctx.booleanLiterals()); } if (null != ctx.nullValueLiterals()) { return visit(ctx.nullValueLiterals()); } throw new IllegalStateException("Literals must have string, number, dateTime, hex, bit, boolean or null."); } @Override public final ASTNode visitStringLiterals(final StringLiteralsContext ctx) { return new StringLiteralValue(ctx.getText()); } @Override public final ASTNode visitNumberLiterals(final NumberLiteralsContext ctx) { return new NumberLiteralValue(ctx.getText()); } @Override public ASTNode visitTemporalLiterals(final TemporalLiteralsContext ctx) { return new OtherLiteralValue(ctx.getText()); } @Override public final ASTNode visitHexadecimalLiterals(final HexadecimalLiteralsContext ctx) { return new OtherLiteralValue(ctx.getText()); } @Override public final ASTNode visitBitValueLiterals(final BitValueLiteralsContext ctx) { return new OtherLiteralValue(ctx.getText()); } @Override public final ASTNode visitBooleanLiterals(final BooleanLiteralsContext ctx) { return new BooleanLiteralValue(ctx.getText()); } @Override public final ASTNode visitNullValueLiterals(final NullValueLiteralsContext ctx) { return new OtherLiteralValue(ctx.getText()); } @Override public final ASTNode visitIdentifier(final IdentifierContext ctx) { return new IdentifierValue(ctx.getText()); } @Override public final ASTNode visitSchemaName(final SchemaNameContext ctx) { return visit(ctx.identifier()); } @Override public final ASTNode visitTableName(final TableNameContext ctx) { SimpleTableSegment result = new SimpleTableSegment(new TableNameSegment(ctx.name().getStart().getStartIndex(), ctx.name().getStop().getStopIndex(), new IdentifierValue(ctx.name().identifier().getText()))); OwnerContext owner = ctx.owner(); if (null != owner) { result.setOwner(new OwnerSegment(owner.getStart().getStartIndex(), owner.getStop().getStopIndex(), (IdentifierValue) visit(owner.identifier()))); } return result; } @Override public final ASTNode visitViewName(final ViewNameContext ctx) { SimpleTableSegment result = new SimpleTableSegment(new TableNameSegment(ctx.identifier().getStart().getStartIndex(), ctx.identifier().getStop().getStopIndex(), new IdentifierValue(ctx.identifier().getText()))); OwnerContext owner = ctx.owner(); if (null != owner) { result.setOwner(new OwnerSegment(owner.getStart().getStartIndex(), owner.getStop().getStopIndex(), (IdentifierValue) visit(owner.identifier()))); } return result; } @Override public final ASTNode visitColumnName(final ColumnNameContext ctx) { return new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier())); } @Override public final ASTNode visitIndexName(final IndexNameContext ctx) { return new IndexSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier())); } @Override public ASTNode visitTableList(final TableListContext ctx) { CollectionValue<SimpleTableSegment> result = new CollectionValue<>(); for (TableNameContext each : ctx.tableName()) { result.getValue().add((SimpleTableSegment) visit(each)); } return result; } @Override public final ASTNode visitViewNames(final ViewNamesContext ctx) { CollectionValue<SimpleTableSegment> result = new CollectionValue<>(); for (ViewNameContext each : ctx.viewName()) { result.getValue().add((SimpleTableSegment) visit(each)); } return result; } @Override public final ASTNode visitColumnNames(final ColumnNamesContext ctx) { CollectionValue<ColumnSegment> result = new CollectionValue<>(); for (ColumnNameContext each : ctx.columnName()) { result.getValue().add((ColumnSegment) visit(each)); } return result; } @Override public final ASTNode visitExpr(final ExprContext ctx) { if (null != ctx.booleanPrimary()) { return visit(ctx.booleanPrimary()); } if (null != ctx.XOR()) { ExpressionSegment left = (ExpressionSegment) visit(ctx.expr(0)); ExpressionSegment right = (ExpressionSegment) visit(ctx.expr(1)); String operator = "XOR"; String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } if (null != ctx.logicalOperator()) { String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (ExpressionSegment) visit(ctx.expr(0)), (ExpressionSegment) visit(ctx.expr(1)), ctx.logicalOperator().getText(), text); } return new NotExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (ExpressionSegment) visit(ctx.expr(0))); } @Override public final ASTNode visitBooleanPrimary(final BooleanPrimaryContext ctx) { if (null != ctx.IS()) { ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary()); ExpressionSegment right = new LiteralExpressionSegment(ctx.IS().getSymbol().getStopIndex() + 1, ctx.stop.getStopIndex(), new Interval(ctx.IS().getSymbol().getStopIndex() + 1, ctx.stop.getStopIndex())); String operator = "IS"; String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } if (null != ctx.comparisonOperator() || null != ctx.SAFE_EQ_()) { return createCompareSegment(ctx); } if (null != ctx.assignmentOperator()) { return createAssignmentSegment(ctx); } return visit(ctx.predicate()); } private ASTNode createAssignmentSegment(final BooleanPrimaryContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary()); ExpressionSegment right = (ExpressionSegment) visit(ctx.predicate()); String operator = ctx.assignmentOperator().getText(); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } private ASTNode createCompareSegment(final BooleanPrimaryContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary()); ExpressionSegment right; if (null != ctx.predicate()) { right = (ExpressionSegment) visit(ctx.predicate()); } else { right = new SubqueryExpressionSegment(new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery()))); } String operator = null != ctx.SAFE_EQ_() ? ctx.SAFE_EQ_().getText() : ctx.comparisonOperator().getText(); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } @Override public final ASTNode visitPredicate(final PredicateContext ctx) { if (null != ctx.IN()) { return createInSegment(ctx); } if (null != ctx.BETWEEN()) { return createBetweenSegment(ctx); } if (null != ctx.LIKE()) { return createBinaryOperationExpressionFromLike(ctx); } if (null != ctx.REGEXP()) { return createBinaryOperationExpressionFromRegexp(ctx); } return visit(ctx.bitExpr(0)); } private InExpression createInSegment(final PredicateContext ctx) { boolean not = null != ctx.NOT(); ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0)); ExpressionSegment right; if (null != ctx.subquery()) { right = new SubqueryExpressionSegment(new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery()))); } else { right = new ListExpression(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex()); for (ExprContext each : ctx.expr()) { ((ListExpression) right).getItems().add((ExpressionSegment) visit(each)); } } return new InExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, not); } private BinaryOperationExpression createBinaryOperationExpressionFromLike(final PredicateContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0)); String operator; ExpressionSegment right; if (null != ctx.SOUNDS()) { right = (ExpressionSegment) visit(ctx.bitExpr(1)); operator = "SOUNDS LIKE"; } else { ListExpression listExpression = new ListExpression(ctx.simpleExpr(0).start.getStartIndex(), ctx.simpleExpr().get(ctx.simpleExpr().size() - 1).stop.getStopIndex()); for (SimpleExprContext each : ctx.simpleExpr()) { listExpression.getItems().add((ExpressionSegment) visit(each)); } right = listExpression; operator = null != ctx.NOT() ? "NOT LIKE" : "LIKE"; } String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } private BinaryOperationExpression createBinaryOperationExpressionFromRegexp(final PredicateContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0)); ExpressionSegment right = (ExpressionSegment) visit(ctx.bitExpr(1)); String operator = null != ctx.NOT() ? "NOT REGEXP" : "REGEXP"; String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } private BetweenExpression createBetweenSegment(final PredicateContext ctx) { ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0)); ExpressionSegment between = (ExpressionSegment) visit(ctx.bitExpr(1)); ExpressionSegment and = (ExpressionSegment) visit(ctx.predicate()); boolean not = null != ctx.NOT(); return new BetweenExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, between, and, not); } @Override public final ASTNode visitBitExpr(final BitExprContext ctx) { if (null != ctx.simpleExpr()) { return visit(ctx.simpleExpr()); } ExpressionSegment left = (ExpressionSegment) visit(ctx.getChild(0)); ExpressionSegment right = (ExpressionSegment) visit(ctx.getChild(2)); String operator = ctx.getChild(1).getText(); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text); } @Override public final ASTNode visitSimpleExpr(final SimpleExprContext ctx) { int startIndex = ctx.start.getStartIndex(); int stopIndex = ctx.stop.getStopIndex(); if (null != ctx.subquery()) { SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().getStart().getStartIndex(), ctx.subquery().getStop().getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery())); if (null != ctx.EXISTS()) { return new ExistsSubqueryExpression(startIndex, stopIndex, subquerySegment); } return new SubqueryExpressionSegment(subquerySegment); } if (null != ctx.parameterMarker()) { return new ParameterMarkerExpressionSegment(startIndex, stopIndex, ((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue()); } if (null != ctx.literals()) { return SQLUtil.createLiteralExpression(visit(ctx.literals()), startIndex, stopIndex, ctx.literals().start.getInputStream().getText(new Interval(startIndex, stopIndex))); } if (null != ctx.intervalExpression()) { return visit(ctx.intervalExpression()); } if (null != ctx.functionCall()) { return visit(ctx.functionCall()); } if (null != ctx.columnRef()) { return visit(ctx.columnRef()); } if (null != ctx.matchExpression()) { return visit(ctx.matchExpression()); } if (null != ctx.notOperator()) { ASTNode expression = visit(ctx.simpleExpr(0)); if (expression instanceof ExistsSubqueryExpression) { ((ExistsSubqueryExpression) expression).setNot(true); return expression; } return new NotExpression(startIndex, stopIndex, (ExpressionSegment) expression); } if (null != ctx.LP_() && 1 == ctx.expr().size()) { return visit(ctx.expr(0)); } return visitRemainSimpleExpr(ctx); } @Override public ASTNode visitColumnRef(final ColumnRefContext ctx) { int identifierCount = ctx.identifier().size(); ColumnSegment result; if (1 == identifierCount) { result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(0))); } else if (2 == identifierCount) { result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(1))); result.setOwner(new OwnerSegment(ctx.identifier(0).start.getStartIndex(), ctx.identifier(0).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(0)))); } else { result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(2))); result.setOwner(new OwnerSegment(ctx.identifier(1).start.getStartIndex(), ctx.identifier(1).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(1)))); } return result; } @Override public ASTNode visitSubquery(final SubqueryContext ctx) { return visit(ctx.queryExpressionParens()); } @Override public ASTNode visitQueryExpressionParens(final QueryExpressionParensContext ctx) { if (null != ctx.queryExpressionParens()) { return visit(ctx.queryExpressionParens()); } MySQLSelectStatement result = (MySQLSelectStatement) visit(ctx.queryExpression()); if (null != ctx.lockClauseList()) { result.setLock((LockSegment) visit(ctx.lockClauseList())); } result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitLockClauseList(final LockClauseListContext ctx) { LockSegment result = new LockSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); for (MySQLStatementParser.LockClauseContext each : ctx.lockClause()) { if (null != each.tableLockingList()) { result.getTables().addAll(generateTablesFromTableAliasRefList(each.tableLockingList().tableAliasRefList())); } } return result; } @Override public ASTNode visitQueryExpression(final QueryExpressionContext ctx) { MySQLSelectStatement result; if (null != ctx.queryExpressionBody()) { result = (MySQLSelectStatement) visit(ctx.queryExpressionBody()); } else { result = (MySQLSelectStatement) visit(ctx.queryExpressionParens()); } if (null != ctx.orderByClause()) { result.setOrderBy((OrderBySegment) visit(ctx.orderByClause())); } if (null != ctx.limitClause()) { result.setLimit((LimitSegment) visit(ctx.limitClause())); } return result; } @Override public ASTNode visitSelectWithInto(final SelectWithIntoContext ctx) { if (null != ctx.selectWithInto()) { return visit(ctx.selectWithInto()); } MySQLSelectStatement result = (MySQLSelectStatement) visit(ctx.queryExpression()); if (null != ctx.lockClauseList()) { result.setLock((LockSegment) visit(ctx.lockClauseList())); } return result; } @Override public ASTNode visitQueryExpressionBody(final QueryExpressionBodyContext ctx) { if (1 == ctx.getChildCount() && ctx.getChild(0) instanceof QueryPrimaryContext) { return visit(ctx.queryPrimary()); } throw new IllegalStateException("union select is not supported yet."); } @Override public ASTNode visitQuerySpecification(final QuerySpecificationContext ctx) { MySQLSelectStatement result = new MySQLSelectStatement(); result.setProjections((ProjectionsSegment) visit(ctx.projections())); if (null != ctx.selectSpecification()) { result.getProjections().setDistinctRow(isDistinct(ctx)); } if (null != ctx.fromClause() && null != ctx.fromClause().tableReferences()) { TableSegment tableSource = (TableSegment) visit(ctx.fromClause().tableReferences()); result.setFrom(tableSource); } if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.groupByClause()) { result.setGroupBy((GroupBySegment) visit(ctx.groupByClause())); } if (null != ctx.havingClause()) { result.setHaving((HavingSegment) visit(ctx.havingClause())); } if (null != ctx.windowClause()) { result.setWindow((WindowSegment) visit(ctx.windowClause())); } return result; } @Override public ASTNode visitWindowClause(final WindowClauseContext ctx) { return new WindowSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); } @Override public ASTNode visitHavingClause(final HavingClauseContext ctx) { ExpressionSegment expr = (ExpressionSegment) visit(ctx.expr()); return new HavingSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), expr); } @Override public final ASTNode visitIntervalExpression(final IntervalExpressionContext ctx) { calculateParameterCount(Collections.singleton(ctx.intervalValue().expr())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } @Override public final ASTNode visitFunctionCall(final FunctionCallContext ctx) { if (null != ctx.aggregationFunction()) { return visit(ctx.aggregationFunction()); } if (null != ctx.specialFunction()) { return visit(ctx.specialFunction()); } if (null != ctx.regularFunction()) { return visit(ctx.regularFunction()); } throw new IllegalStateException("FunctionCallContext must have aggregationFunction, regularFunction or specialFunction."); } @Override public final ASTNode visitAggregationFunction(final AggregationFunctionContext ctx) { String aggregationType = ctx.aggregationFunctionName().getText(); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return AggregationType.isAggregationType(aggregationType) ? createAggregationSegment(ctx, aggregationType) : new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } private ASTNode createAggregationSegment(final AggregationFunctionContext ctx, final String aggregationType) { AggregationType type = AggregationType.valueOf(aggregationType.toUpperCase()); String innerExpression = ctx.start.getInputStream().getText(new Interval(ctx.LP_().getSymbol().getStartIndex(), ctx.stop.getStopIndex())); if (null == ctx.distinct()) { return new AggregationProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), type, innerExpression); } return new AggregationDistinctProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), type, innerExpression, getDistinctExpression(ctx)); } private String getDistinctExpression(final AggregationFunctionContext ctx) { StringBuilder result = new StringBuilder(); for (int i = 3; i < ctx.getChildCount() - 1; i++) { result.append(ctx.getChild(i).getText()); } return result.toString(); } @Override public final ASTNode visitSpecialFunction(final SpecialFunctionContext ctx) { if (null != ctx.groupConcatFunction()) { return visit(ctx.groupConcatFunction()); } if (null != ctx.windowFunction()) { return visit(ctx.windowFunction()); } if (null != ctx.castFunction()) { return visit(ctx.castFunction()); } if (null != ctx.convertFunction()) { return visit(ctx.convertFunction()); } if (null != ctx.positionFunction()) { return visit(ctx.positionFunction()); } if (null != ctx.substringFunction()) { return visit(ctx.substringFunction()); } if (null != ctx.extractFunction()) { return visit(ctx.extractFunction()); } if (null != ctx.charFunction()) { return visit(ctx.charFunction()); } if (null != ctx.weightStringFunction()) { return visit(ctx.weightStringFunction()); } String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitGroupConcatFunction(final GroupConcatFunctionContext ctx) { calculateParameterCount(ctx.expr()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitWindowFunction(final WindowFunctionContext ctx) { super.visitWindowFunction(ctx); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitCastFunction(final CastFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitConvertFunction(final ConvertFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitPositionFunction(final PositionFunctionContext ctx) { calculateParameterCount(ctx.expr()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitSubstringFunction(final SubstringFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitExtractFunction(final ExtractFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitCharFunction(final CharFunctionContext ctx) { calculateParameterCount(ctx.expr()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitWeightStringFunction(final WeightStringFunctionContext ctx) { calculateParameterCount(Collections.singleton(ctx.expr())); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitRegularFunction(final RegularFunctionContext ctx) { if (null != ctx.completeRegularFunction()) { calculateParameterCount(ctx.completeRegularFunction().expr()); } String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } private ASTNode visitRemainSimpleExpr(final SimpleExprContext ctx) { if (null != ctx.caseExpression()) { visit(ctx.caseExpression()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } for (ExprContext each : ctx.expr()) { visit(each); } for (SimpleExprContext each : ctx.simpleExpr()) { visit(each); } String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } @Override public final ASTNode visitMatchExpression(final MatchExpressionContext ctx) { visit(ctx.expr()); String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex())); return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text); } private void calculateParameterCount(final Collection<ExprContext> exprContexts) { for (ExprContext each : exprContexts) { visit(each); } } @Override public final ASTNode visitDataType(final DataTypeContext ctx) { DataTypeSegment result = new DataTypeSegment(); result.setDataTypeName(ctx.dataTypeName.getText()); result.setStartIndex(ctx.start.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); if (null != ctx.fieldLength()) { DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.fieldLength()); result.setDataLength(dataTypeLengthSegment); } if (null != ctx.precision()) { DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.precision()); result.setDataLength(dataTypeLengthSegment); } return result; } @Override public ASTNode visitFieldLength(final FieldLengthContext ctx) { DataTypeLengthSegment result = new DataTypeLengthSegment(); result.setStartIndex(ctx.start.getStartIndex()); result.setStopIndex(ctx.stop.getStartIndex()); result.setPrecision(Integer.parseInt(ctx.length.getText())); return result; } @Override public ASTNode visitPrecision(final PrecisionContext ctx) { DataTypeLengthSegment result = new DataTypeLengthSegment(); result.setStartIndex(ctx.start.getStartIndex()); result.setStopIndex(ctx.stop.getStartIndex()); List<TerminalNode> numbers = ctx.NUMBER_(); result.setPrecision(Integer.parseInt(numbers.get(0).getText())); result.setScale(Integer.parseInt(numbers.get(1).getText())); return result; } @Override public final ASTNode visitOrderByClause(final OrderByClauseContext ctx) { Collection<OrderByItemSegment> items = new LinkedList<>(); for (OrderByItemContext each : ctx.orderByItem()) { items.add((OrderByItemSegment) visit(each)); } return new OrderBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items); } @Override public final ASTNode visitOrderByItem(final OrderByItemContext ctx) { OrderDirection orderDirection; if (null != ctx.direction()) { orderDirection = null != ctx.direction().DESC() ? OrderDirection.DESC : OrderDirection.ASC; } else { orderDirection = OrderDirection.ASC; } if (null != ctx.numberLiterals()) { return new IndexOrderByItemSegment(ctx.numberLiterals().getStart().getStartIndex(), ctx.numberLiterals().getStop().getStopIndex(), SQLUtil.getExactlyNumber(ctx.numberLiterals().getText(), 10).intValue(), orderDirection); } else { ASTNode expr = visitExpr(ctx.expr()); if (expr instanceof ColumnSegment) { return new ColumnOrderByItemSegment((ColumnSegment) expr, orderDirection); } else { return new ExpressionOrderByItemSegment(ctx.expr().getStart().getStartIndex(), ctx.expr().getStop().getStopIndex(), ctx.expr().getText(), orderDirection, (ExpressionSegment) expr); } } } @Override public ASTNode visitInsert(final InsertContext ctx) { MySQLInsertStatement result; if (null != ctx.insertValuesClause()) { result = (MySQLInsertStatement) visit(ctx.insertValuesClause()); } else if (null != ctx.insertSelectClause()) { result = (MySQLInsertStatement) visit(ctx.insertSelectClause()); } else { result = new MySQLInsertStatement(); result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause())); } if (null != ctx.onDuplicateKeyClause()) { result.setOnDuplicateKeyColumns((OnDuplicateKeyColumnsSegment) visit(ctx.onDuplicateKeyClause())); } result.setTable((SimpleTableSegment) visit(ctx.tableName())); result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitInsertSelectClause(final InsertSelectClauseContext ctx) { MySQLInsertStatement result = new MySQLInsertStatement(); if (null != ctx.LP_()) { if (null != ctx.fields()) { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields()))); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList())); } } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.setInsertSelect(createInsertSelectSegment(ctx)); return result; } private SubquerySegment createInsertSelectSegment(final InsertSelectClauseContext ctx) { MySQLSelectStatement selectStatement = (MySQLSelectStatement) visit(ctx.select()); return new SubquerySegment(ctx.select().start.getStartIndex(), ctx.select().stop.getStopIndex(), selectStatement); } @Override public ASTNode visitInsertValuesClause(final InsertValuesClauseContext ctx) { MySQLInsertStatement result = new MySQLInsertStatement(); if (null != ctx.LP_()) { if (null != ctx.fields()) { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields()))); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList())); } } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.getValues().addAll(createInsertValuesSegments(ctx.assignmentValues())); return result; } private Collection<InsertValuesSegment> createInsertValuesSegments(final Collection<MySQLStatementParser.AssignmentValuesContext> assignmentValuesContexts) { Collection<InsertValuesSegment> result = new LinkedList<>(); for (MySQLStatementParser.AssignmentValuesContext each : assignmentValuesContexts) { result.add((InsertValuesSegment) visit(each)); } return result; } @Override public ASTNode visitOnDuplicateKeyClause(final OnDuplicateKeyClauseContext ctx) { Collection<AssignmentSegment> columns = new LinkedList<>(); for (MySQLStatementParser.AssignmentContext each : ctx.assignment()) { columns.add((AssignmentSegment) visit(each)); } return new OnDuplicateKeyColumnsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), columns); } @Override public ASTNode visitReplace(final ReplaceContext ctx) { MySQLInsertStatement result; if (null != ctx.replaceValuesClause()) { result = (MySQLInsertStatement) visit(ctx.replaceValuesClause()); } else if (null != ctx.replaceSelectClause()) { result = (MySQLInsertStatement) visit(ctx.replaceSelectClause()); } else { result = new MySQLInsertStatement(); result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause())); } result.setTable((SimpleTableSegment) visit(ctx.tableName())); result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitReplaceSelectClause(final ReplaceSelectClauseContext ctx) { MySQLInsertStatement result = new MySQLInsertStatement(); if (null != ctx.LP_()) { if (null != ctx.fields()) { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields()))); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList())); } } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.setInsertSelect(createReplaceSelectSegment(ctx)); return result; } private SubquerySegment createReplaceSelectSegment(final ReplaceSelectClauseContext ctx) { MySQLSelectStatement selectStatement = (MySQLSelectStatement) visit(ctx.select()); return new SubquerySegment(ctx.select().start.getStartIndex(), ctx.select().stop.getStopIndex(), selectStatement); } @Override public ASTNode visitReplaceValuesClause(final ReplaceValuesClauseContext ctx) { MySQLInsertStatement result = new MySQLInsertStatement(); if (null != ctx.LP_()) { if (null != ctx.fields()) { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields()))); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList())); } } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.getValues().addAll(createReplaceValuesSegments(ctx.assignmentValues())); return result; } private List<ColumnSegment> createInsertColumns(final FieldsContext fields) { List<ColumnSegment> result = new LinkedList<>(); for (InsertIdentifierContext each : fields.insertIdentifier()) { result.add((ColumnSegment) visit(each)); } return result; } private Collection<InsertValuesSegment> createReplaceValuesSegments(final Collection<MySQLStatementParser.AssignmentValuesContext> assignmentValuesContexts) { Collection<InsertValuesSegment> result = new LinkedList<>(); for (MySQLStatementParser.AssignmentValuesContext each : assignmentValuesContexts) { result.add((InsertValuesSegment) visit(each)); } return result; } @Override public ASTNode visitUpdate(final UpdateContext ctx) { MySQLUpdateStatement result = new MySQLUpdateStatement(); TableSegment tableSegment = (TableSegment) visit(ctx.tableReferences()); result.setTableSegment(tableSegment); result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause())); if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.orderByClause()) { result.setOrderBy((OrderBySegment) visit(ctx.orderByClause())); } if (null != ctx.limitClause()) { result.setLimit((LimitSegment) visit(ctx.limitClause())); } result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitSetAssignmentsClause(final SetAssignmentsClauseContext ctx) { Collection<AssignmentSegment> assignments = new LinkedList<>(); for (MySQLStatementParser.AssignmentContext each : ctx.assignment()) { assignments.add((AssignmentSegment) visit(each)); } return new SetAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), assignments); } @Override public ASTNode visitAssignmentValues(final AssignmentValuesContext ctx) { List<ExpressionSegment> segments = new LinkedList<>(); for (MySQLStatementParser.AssignmentValueContext each : ctx.assignmentValue()) { segments.add((ExpressionSegment) visit(each)); } return new InsertValuesSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), segments); } @Override public ASTNode visitAssignment(final AssignmentContext ctx) { ColumnSegment column = (ColumnSegment) visit(ctx.columnRef()); ExpressionSegment value = (ExpressionSegment) visit(ctx.assignmentValue()); return new AssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, value); } @Override public ASTNode visitAssignmentValue(final AssignmentValueContext ctx) { ExprContext expr = ctx.expr(); if (null != expr) { ASTNode result = visit(expr); if (result instanceof ColumnSegment) { return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } else { return result; } } return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } @Override public ASTNode visitBlobValue(final BlobValueContext ctx) { return new StringLiteralValue(ctx.string_().getText()); } @Override public ASTNode visitDelete(final DeleteContext ctx) { MySQLDeleteStatement result = new MySQLDeleteStatement(); if (null != ctx.multipleTablesClause()) { result.setTableSegment((TableSegment) visit(ctx.multipleTablesClause())); } else { result.setTableSegment((TableSegment) visit(ctx.singleTableClause())); } if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.orderByClause()) { result.setOrderBy((OrderBySegment) visit(ctx.orderByClause())); } if (null != ctx.limitClause()) { result.setLimit((LimitSegment) visit(ctx.limitClause())); } result.setParameterCount(currentParameterIndex); return result; } @Override public ASTNode visitSingleTableClause(final SingleTableClauseContext ctx) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } @Override public ASTNode visitMultipleTablesClause(final MultipleTablesClauseContext ctx) { DeleteMultiTableSegment result = new DeleteMultiTableSegment(); TableSegment relateTableSource = (TableSegment) visit(ctx.tableReferences()); result.setRelationTable(relateTableSource); result.setActualDeleteTables(generateTablesFromTableAliasRefList(ctx.tableAliasRefList())); return result; } private List<SimpleTableSegment> generateTablesFromTableAliasRefList(final TableAliasRefListContext ctx) { List<SimpleTableSegment> result = new LinkedList<>(); for (MySQLStatementParser.TableIdentOptWildContext each : ctx.tableIdentOptWild()) { result.add((SimpleTableSegment) visit(each.tableName())); } return result; } @Override public ASTNode visitSelect(final SelectContext ctx) { MySQLSelectStatement result; if (null != ctx.queryExpression()) { result = (MySQLSelectStatement) visit(ctx.queryExpression()); if (null != ctx.lockClauseList()) { result.setLock((LockSegment) visit(ctx.lockClauseList())); } } else if (null != ctx.selectWithInto()) { result = (MySQLSelectStatement) visit(ctx.selectWithInto()); } else { result = (MySQLSelectStatement) visit(ctx.getChild(0)); } result.setParameterCount(currentParameterIndex); return result; } private boolean isDistinct(final QuerySpecificationContext ctx) { for (MySQLStatementParser.SelectSpecificationContext each : ctx.selectSpecification()) { if (((BooleanLiteralValue) visit(each)).getValue()) { return true; } } return false; } @Override public ASTNode visitSelectSpecification(final SelectSpecificationContext ctx) { if (null != ctx.duplicateSpecification()) { return visit(ctx.duplicateSpecification()); } return new BooleanLiteralValue(false); } @Override public ASTNode visitDuplicateSpecification(final DuplicateSpecificationContext ctx) { String text = ctx.getText(); if ("DISTINCT".equalsIgnoreCase(text) || "DISTINCTROW".equalsIgnoreCase(text)) { return new BooleanLiteralValue(true); } return new BooleanLiteralValue(false); } @Override public ASTNode visitProjections(final ProjectionsContext ctx) { Collection<ProjectionSegment> projections = new LinkedList<>(); if (null != ctx.unqualifiedShorthand()) { projections.add(new ShorthandProjectionSegment(ctx.unqualifiedShorthand().getStart().getStartIndex(), ctx.unqualifiedShorthand().getStop().getStopIndex())); } for (MySQLStatementParser.ProjectionContext each : ctx.projection()) { projections.add((ProjectionSegment) visit(each)); } ProjectionsSegment result = new ProjectionsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); result.getProjections().addAll(projections); return result; } @Override public ASTNode visitProjection(final ProjectionContext ctx) { if (null != ctx.qualifiedShorthand()) { QualifiedShorthandContext shorthand = ctx.qualifiedShorthand(); ShorthandProjectionSegment result = new ShorthandProjectionSegment(shorthand.getStart().getStartIndex(), shorthand.getStop().getStopIndex()); IdentifierValue identifier = new IdentifierValue(shorthand.identifier().getText()); result.setOwner(new OwnerSegment(shorthand.identifier().getStart().getStartIndex(), shorthand.identifier().getStop().getStopIndex(), identifier)); return result; } AliasSegment alias = null == ctx.alias() ? null : (AliasSegment) visit(ctx.alias()); ASTNode exprProjection = visit(ctx.expr()); if (exprProjection instanceof ColumnSegment) { ColumnProjectionSegment result = new ColumnProjectionSegment((ColumnSegment) exprProjection); result.setAlias(alias); return result; } if (exprProjection instanceof SubquerySegment) { SubquerySegment subquerySegment = (SubquerySegment) exprProjection; String text = ctx.start.getInputStream().getText(new Interval(subquerySegment.getStartIndex(), subquerySegment.getStopIndex())); SubqueryProjectionSegment result = new SubqueryProjectionSegment((SubquerySegment) exprProjection, text); result.setAlias(alias); return result; } if (exprProjection instanceof ExistsSubqueryExpression) { ExistsSubqueryExpression existsSubqueryExpression = (ExistsSubqueryExpression) exprProjection; String text = ctx.start.getInputStream().getText(new Interval(existsSubqueryExpression.getStartIndex(), existsSubqueryExpression.getStopIndex())); SubqueryProjectionSegment result = new SubqueryProjectionSegment(((ExistsSubqueryExpression) exprProjection).getSubquery(), text); result.setAlias(alias); return result; } return createProjection(ctx, alias, exprProjection); } @Override public ASTNode visitAlias(final AliasContext ctx) { return new AliasSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), new IdentifierValue(ctx.textOrIdentifier().getText())); } @Override public ASTNode visitFromClause(final FromClauseContext ctx) { return visit(ctx.tableReferences()); } @Override public ASTNode visitTableReferences(final TableReferencesContext ctx) { TableSegment result = (TableSegment) visit(ctx.tableReference(0)); if (ctx.tableReference().size() > 1) { for (int i = 1; i < ctx.tableReference().size(); i++) { result = generateJoinTableSourceFromEscapedTableReference(ctx.tableReference(i), result); } } return result; } private JoinTableSegment generateJoinTableSourceFromEscapedTableReference(final TableReferenceContext ctx, final TableSegment tableSegment) { JoinTableSegment result = new JoinTableSegment(); result.setStartIndex(tableSegment.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); result.setLeft(tableSegment); result.setRight((TableSegment) visit(ctx)); return result; } @Override public ASTNode visitEscapedTableReference(final EscapedTableReferenceContext ctx) { TableSegment result; TableSegment left; left = (TableSegment) visit(ctx.tableFactor()); for (MySQLStatementParser.JoinedTableContext each : ctx.joinedTable()) { left = visitJoinedTable(each, left); } result = left; return result; } @Override public ASTNode visitTableReference(final TableReferenceContext ctx) { TableSegment result; TableSegment left; left = null != ctx.tableFactor() ? (TableSegment) visit(ctx.tableFactor()) : (TableSegment) visit(ctx.escapedTableReference()); for (MySQLStatementParser.JoinedTableContext each : ctx.joinedTable()) { left = visitJoinedTable(each, left); } result = left; return result; } @Override public ASTNode visitTableFactor(final TableFactorContext ctx) { if (null != ctx.subquery()) { MySQLSelectStatement subquery = (MySQLSelectStatement) visit(ctx.subquery()); SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), subquery); SubqueryTableSegment result = new SubqueryTableSegment(subquerySegment); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } if (null != ctx.tableName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } return visit(ctx.tableReferences()); } private JoinTableSegment visitJoinedTable(final JoinedTableContext ctx, final TableSegment tableSegment) { JoinTableSegment result = new JoinTableSegment(); result.setLeft(tableSegment); result.setStartIndex(tableSegment.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); result.setJoinType(getJoinType(ctx)); TableSegment right = null != ctx.tableFactor() ? (TableSegment) visit(ctx.tableFactor()) : (TableSegment) visit(ctx.tableReference()); result.setRight(right); if (null != ctx.joinSpecification()) { result = visitJoinSpecification(ctx.joinSpecification(), result); } return result; } private String getJoinType(final JoinedTableContext ctx) { String joinType = null; if (null != ctx.innerJoinType()) { joinType = ctx.innerJoinType().JOIN() != null ? JoinType.MYSQL_INNER_JOIN.getJoinType() : JoinType.MYSQL_STRAIGHT_JOIN.getJoinType(); } else if (null != ctx.outerJoinType()) { joinType = ctx.outerJoinType().LEFT() != null ? JoinType.MYSQL_LEFT_JOIN.getJoinType() : JoinType.MYSQL_RIGHT_JOIN.getJoinType(); } else if (null != ctx.naturalJoinType()) { if (null != ctx.naturalJoinType().LEFT()) { joinType = JoinType.MYSQL_NATURAL_LEFT_JOIN.getJoinType(); } else if (null != ctx.naturalJoinType().RIGHT()) { joinType = JoinType.MYSQL_NATURAL_RIGHT_JOIN.getJoinType(); } else { joinType = JoinType.MYSQL_NATURAL_INNER_JOIN.getJoinType(); } } return joinType; } private JoinTableSegment visitJoinSpecification(final JoinSpecificationContext ctx, final JoinTableSegment joinTableSource) { if (null != ctx.expr()) { ExpressionSegment condition = (ExpressionSegment) visit(ctx.expr()); joinTableSource.setCondition(condition); } if (null != ctx.USING()) { List<ColumnSegment> columnSegmentList = new LinkedList<>(); for (MySQLStatementParser.ColumnNameContext cname : ctx.columnNames().columnName()) { columnSegmentList.add((ColumnSegment) visit(cname)); } joinTableSource.setUsing(columnSegmentList); } return joinTableSource; } @Override public ASTNode visitWhereClause(final WhereClauseContext ctx) { ASTNode segment = visit(ctx.expr()); return new WhereSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ExpressionSegment) segment); } @Override public ASTNode visitGroupByClause(final GroupByClauseContext ctx) { Collection<OrderByItemSegment> items = new LinkedList<>(); for (OrderByItemContext each : ctx.orderByItem()) { items.add((OrderByItemSegment) visit(each)); } return new GroupBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items); } @Override public ASTNode visitLimitClause(final LimitClauseContext ctx) { if (null == ctx.limitOffset()) { return new LimitSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), null, (PaginationValueSegment) visit(ctx.limitRowCount())); } PaginationValueSegment rowCount; PaginationValueSegment offset; if (null != ctx.OFFSET()) { rowCount = (PaginationValueSegment) visit(ctx.limitRowCount()); offset = (PaginationValueSegment) visit(ctx.limitOffset()); } else { offset = (PaginationValueSegment) visit(ctx.limitOffset()); rowCount = (PaginationValueSegment) visit(ctx.limitRowCount()); } return new LimitSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), offset, rowCount); } @Override public ASTNode visitLimitRowCount(final LimitRowCountContext ctx) { if (null != ctx.numberLiterals()) { return new NumberLiteralLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((NumberLiteralValue) visit(ctx.numberLiterals())).getValue().longValue()); } return new ParameterMarkerLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue()); } @Override public final ASTNode visitConstraintName(final ConstraintNameContext ctx) { return new ConstraintSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier())); } @Override public ASTNode visitLimitOffset(final LimitOffsetContext ctx) { if (null != ctx.numberLiterals()) { return new NumberLiteralLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((NumberLiteralValue) visit(ctx.numberLiterals())).getValue().longValue()); } return new ParameterMarkerLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue()); } }
```suggestion supportedPlatforms.add(ProgramFileConstants.ANY_PLATFORM); ``` Can we use this content without adding a String here?
private NativeDependencyResolverImpl(BuildContext buildContext, boolean skipCopyLibsFromDist) { CompilerContext context = buildContext.get(BuildContextField.COMPILER_CONTEXT); this.buildContext = buildContext; this.manifest = ManifestProcessor.getInstance(context).getManifest(); this.sourceRootPath = buildContext.get(BuildContextField.SOURCE_ROOT); this.skipCopyLibsFromDist = skipCopyLibsFromDist; this.balHomePath = buildContext.get(BuildContextField.HOME_REPO).toString(); supportedPlatforms.add("any"); }
supportedPlatforms.add("any");
private NativeDependencyResolverImpl(BuildContext buildContext, boolean skipCopyLibsFromDist) { CompilerContext context = buildContext.get(BuildContextField.COMPILER_CONTEXT); this.buildContext = buildContext; this.manifest = ManifestProcessor.getInstance(context).getManifest(); this.sourceRootPath = buildContext.get(BuildContextField.SOURCE_ROOT); this.skipCopyLibsFromDist = skipCopyLibsFromDist; this.balHomePath = buildContext.get(BuildContextField.HOME_REPO).toString(); supportedPlatforms.add(ProgramFileConstants.ANY_PLATFORM); }
class NativeDependencyResolverImpl implements NativeDependencyResolver { private static final CompilerContext.Key<NativeDependencyResolver> JAR_RESOLVER_KEY = new CompilerContext.Key<>(); private List<String> supportedPlatforms = Arrays.stream(ProgramFileConstants.SUPPORTED_PLATFORMS) .collect(Collectors.toList()); private final BuildContext buildContext; private final Path sourceRootPath; private final String balHomePath; private final Manifest manifest; private boolean skipCopyLibsFromDist; public static NativeDependencyResolver getInstance(BuildContext buildContext, boolean skipCopyLibsFromDist) { CompilerContext context = buildContext.get(BuildContextField.COMPILER_CONTEXT); NativeDependencyResolver nativeDependencyResolver = context.get(JAR_RESOLVER_KEY); if (nativeDependencyResolver == null) { nativeDependencyResolver = new NativeDependencyResolverImpl(buildContext, skipCopyLibsFromDist); } context.put(JAR_RESOLVER_KEY, nativeDependencyResolver); return nativeDependencyResolver; } @Override public Path moduleJar(PackageID packageID, String platform) { if (isProjectModule(packageID)) { return buildContext.getBaloFromTarget(packageID); } else if (isPathDependency(packageID)) { return buildContext.getJarPathFromTargetCache(packageID); } else if (isModuleInDistribution(packageID)) { return getJarFromDistribution(packageID); } else { return buildContext.getBaloFromHomeCache(packageID, platform); } } @Override public List<Path> nativeDependencies(PackageID packageID) { List<Path> modulePlatformLibs = new ArrayList<>(); addPlatformLibs(packageID, modulePlatformLibs); if (isProjectModule(packageID)) { return modulePlatformLibs; } else if (isPathDependency(packageID)) { addLibsFromBaloDependency(packageID, modulePlatformLibs); } else if (isModuleInDistribution(packageID)) { addLibsFromDistribution(packageID, modulePlatformLibs); } else { addLibsFromHomeBaloCache(packageID, modulePlatformLibs); } return modulePlatformLibs; } @Override public List<Path> nativeDependenciesForTests(PackageID packageID) { List<Path> testPlatformLibs = new ArrayList<>(); List<Library> libraries = manifest.getPlatform().libraries; if (libraries != null) { for (Library library : libraries) { if ((library.getModules() == null || Arrays.asList(library.getModules()).contains(packageID.name.value)) && (library.getScope() != null && library.getScope().equalsIgnoreCase("testOnly"))) { String libFilePath = library.getPath(); if (libFilePath == null) { continue; } Path nativeFile = sourceRootPath.resolve(Paths.get(libFilePath)); testPlatformLibs.add(nativeFile); } } } return testPlatformLibs; } private boolean isModuleInDistribution(PackageID packageID) { return getTomlFilePath(packageID).exists(); } private File getTomlFilePath(PackageID packageID) { String version = BLANG_PKG_DEFAULT_VERSION; if (!packageID.version.value.equals("")) { version = packageID.version.value; } return Paths.get(balHomePath, DIST_BIR_CACHE_DIR_NAME, packageID.orgName.value, packageID.name.value, version, "Ballerina.toml").toFile(); } private boolean isPathDependency(PackageID packageID) { return buildContext.getImportPathDependency(packageID).isPresent(); } private boolean isProjectModule(PackageID packageID) { return manifest.getProject().getOrgName().equals(packageID.orgName.value) && ProjectDirs.isModuleExist(sourceRootPath, packageID.name.value); } private void addLibsFromHomeBaloCache(PackageID packageID, List<Path> modulePlatformLibs) { for (String platform : supportedPlatforms) { Path baloPath = buildContext.getBaloFromHomeCache(packageID, platform); if (baloPath != null && Files.exists(baloPath)) { addLibsFromBalo(baloPath, modulePlatformLibs); } } } private void addLibsFromBaloDependency(PackageID packageID, List<Path> modulePlatformLibs) { addLibsFromBalo(buildContext.getImportPathDependency(packageID).get().getMetadata().getPath(), modulePlatformLibs); } private void addLibsFromDistribution(PackageID packageID, List<Path> modulePlatformLibs) { List<Path> dependencies = getDependenciesFromDist(packageID); if (dependencies != null) { modulePlatformLibs.addAll(dependencies); } } private void addPlatformLibs(PackageID packageID, List<Path> modulePlatformLibs) { List<Path> platformLibs = new ArrayList<>(); List<Library> libraries = manifest.getPlatform().libraries; Optional<Dependency> importPathDependency = buildContext.getImportPathDependency(packageID); if (libraries != null) { for (Library library : libraries) { if ((library.getModules() == null || Arrays.asList(library.getModules()).contains(packageID.name.value) || Arrays.asList(library.getModules()).contains(packageID.orgName.value + "/" + packageID.name.value)) && !(library.getScope() != null && library.getScope().equalsIgnoreCase("testOnly"))) { String libFilePath = library.getPath(); if (libFilePath == null) { continue; } Path nativeFile = sourceRootPath.resolve(Paths.get(libFilePath)); if (importPathDependency.isPresent()) { platformLibs.add(nativeFile.getFileName()); } modulePlatformLibs.add(nativeFile); } } } importPathDependency.ifPresent(dependency -> validateBaloDependencies(packageID, platformLibs, dependency.getMetadata().getPath())); } private void validateBaloDependencies(PackageID packageID, List<Path> platformLibs, Path importDependencyPath) { Manifest manifestFromBalo = RepoUtils.getManifestFromBalo(importDependencyPath); List<Library> baloDependencies = manifestFromBalo.getPlatform().libraries; List<Path> baloCompileScopeDependencies = new ArrayList<>(); if (baloDependencies == null) { return; } for (Library baloTomlLib : baloDependencies) { if (baloTomlLib.getScope() != null && baloTomlLib.getScope().equalsIgnoreCase("provided")) { baloCompileScopeDependencies.add(Paths.get(baloTomlLib.getPath()).getFileName()); } } for (Path baloTomlLib : baloCompileScopeDependencies) { if (!platformLibs.contains(baloTomlLib)) { buildContext.out().println("warning: " + packageID + " is missing a native library dependency - " + baloTomlLib); } } } private void addLibsFromBalo(Path baloFilePath, List<Path> moduleDependencySet) { String fileName = baloFilePath.getFileName().toString(); Path baloFileUnzipDirectory = Paths.get(baloFilePath.getParent().toString(), fileName.substring(0, fileName.lastIndexOf("."))); File destFile = baloFileUnzipDirectory.toFile(); if (!destFile.mkdir()) { try (DirectoryStream<Path> stream = Files.newDirectoryStream(Paths.get(destFile.toString()))) { for (Path path : stream) { moduleDependencySet.add(path); } } catch (IOException e) { throw createLauncherException("unable to copy native jar: " + e.getMessage()); } return; } try (JarFile jar = new JarFile(baloFilePath.toFile())) { Enumeration<JarEntry> enumEntries = jar.entries(); while (enumEntries.hasMoreElements()) { JarEntry file = enumEntries.nextElement(); String entryName = file.getName(); if (!entryName.endsWith(BLANG_COMPILED_JAR_EXT) || !entryName.contains(BALO_PLATFORM_LIB_DIR_NAME)) { continue; } File f = Paths.get(baloFileUnzipDirectory.toString(), entryName.split(BALO_PLATFORM_LIB_DIR_NAME)[1]).toFile(); if (!f.exists()) { try (InputStream is = jar.getInputStream(file)) { Files.copy(is, f.toPath()); } } moduleDependencySet.add(f.toPath()); } } catch (IOException e) { throw createLauncherException("unable to copy native jar: " + e.getMessage()); } } private List<Path> getDependenciesFromDist(PackageID packageID) { List<Path> libPaths = new ArrayList<>(); File tomlFile = getTomlFilePath(packageID); if (skipCopyLibsFromDist) { return null; } Toml tomlConfig = new Toml().read(tomlFile); Toml platform = tomlConfig.getTable("platform"); if (platform == null) { return null; } List<Object> libraries = platform.getList("libraries"); if (libraries == null) { return null; } for (Object lib : libraries) { Path fileName = Paths.get(((HashMap) lib).get("path").toString()).getFileName(); libPaths.add(Paths.get(balHomePath, "bre", "lib", fileName.toString())); } return libPaths; } private Path getJarFromDistribution(PackageID packageID) { List<Path> dependencies = getDependenciesFromDist(packageID); Path jarPath = null; if (dependencies == null) { return null; } for (Path dependency: dependencies) { if (dependency.getFileName().toString().equals(String.join("-", packageID.orgName.value, packageID.name.value, RepoUtils.getBallerinaVersion()).concat(".jar"))) { jarPath = dependency; break; } } return jarPath; } }
class NativeDependencyResolverImpl implements NativeDependencyResolver { private static final CompilerContext.Key<NativeDependencyResolver> JAR_RESOLVER_KEY = new CompilerContext.Key<>(); private List<String> supportedPlatforms = Arrays.stream(ProgramFileConstants.SUPPORTED_PLATFORMS) .collect(Collectors.toList()); private final BuildContext buildContext; private final Path sourceRootPath; private final String balHomePath; private final Manifest manifest; private boolean skipCopyLibsFromDist; public static NativeDependencyResolver getInstance(BuildContext buildContext, boolean skipCopyLibsFromDist) { CompilerContext context = buildContext.get(BuildContextField.COMPILER_CONTEXT); NativeDependencyResolver nativeDependencyResolver = context.get(JAR_RESOLVER_KEY); if (nativeDependencyResolver == null) { nativeDependencyResolver = new NativeDependencyResolverImpl(buildContext, skipCopyLibsFromDist); } context.put(JAR_RESOLVER_KEY, nativeDependencyResolver); return nativeDependencyResolver; } @Override public Path moduleJar(PackageID packageID, String platform) { if (isProjectModule(packageID)) { return buildContext.getBaloFromTarget(packageID); } else if (isPathDependency(packageID)) { return buildContext.getJarPathFromTargetCache(packageID); } else if (isModuleInDistribution(packageID)) { return getJarFromDistribution(packageID); } else { return buildContext.getBaloFromHomeCache(packageID, platform); } } @Override public List<Path> nativeDependencies(PackageID packageID) { List<Path> modulePlatformLibs = new ArrayList<>(); addPlatformLibs(packageID, modulePlatformLibs); if (isProjectModule(packageID)) { return modulePlatformLibs; } else if (isPathDependency(packageID)) { addLibsFromBaloDependency(packageID, modulePlatformLibs); } else if (isModuleInDistribution(packageID)) { addLibsFromDistribution(packageID, modulePlatformLibs); } else { addLibsFromHomeBaloCache(packageID, modulePlatformLibs); } return modulePlatformLibs; } @Override public List<Path> nativeDependenciesForTests(PackageID packageID) { List<Path> testPlatformLibs = new ArrayList<>(); List<Library> libraries = manifest.getPlatform().libraries; if (libraries != null) { for (Library library : libraries) { if ((library.getModules() == null || Arrays.asList(library.getModules()).contains(packageID.name.value)) && (library.getScope() != null && library.getScope().equalsIgnoreCase("testOnly"))) { String libFilePath = library.getPath(); if (libFilePath == null) { continue; } Path nativeFile = sourceRootPath.resolve(Paths.get(libFilePath)); testPlatformLibs.add(nativeFile); } } } return testPlatformLibs; } private boolean isModuleInDistribution(PackageID packageID) { return getTomlFilePath(packageID).exists(); } private File getTomlFilePath(PackageID packageID) { String version = BLANG_PKG_DEFAULT_VERSION; if (!packageID.version.value.equals("")) { version = packageID.version.value; } return Paths.get(balHomePath, DIST_BIR_CACHE_DIR_NAME, packageID.orgName.value, packageID.name.value, version, "Ballerina.toml").toFile(); } private boolean isPathDependency(PackageID packageID) { return buildContext.getImportPathDependency(packageID).isPresent(); } private boolean isProjectModule(PackageID packageID) { return manifest.getProject().getOrgName().equals(packageID.orgName.value) && ProjectDirs.isModuleExist(sourceRootPath, packageID.name.value); } private void addLibsFromHomeBaloCache(PackageID packageID, List<Path> modulePlatformLibs) { for (String platform : supportedPlatforms) { Path baloPath = buildContext.getBaloFromHomeCache(packageID, platform); if (baloPath != null && baloPath.toFile().exists()) { addLibsFromBalo(baloPath, modulePlatformLibs); } } } private void addLibsFromBaloDependency(PackageID packageID, List<Path> modulePlatformLibs) { addLibsFromBalo(buildContext.getImportPathDependency(packageID).get().getMetadata().getPath(), modulePlatformLibs); } private void addLibsFromDistribution(PackageID packageID, List<Path> modulePlatformLibs) { List<Path> dependencies = getDependenciesFromDist(packageID); if (dependencies != null) { modulePlatformLibs.addAll(dependencies); } } private void addPlatformLibs(PackageID packageID, List<Path> modulePlatformLibs) { List<Path> platformLibs = new ArrayList<>(); List<Library> libraries = manifest.getPlatform().libraries; Optional<Dependency> importPathDependency = buildContext.getImportPathDependency(packageID); if (libraries != null) { for (Library library : libraries) { if ((library.getModules() == null || Arrays.asList(library.getModules()).contains(packageID.name.value) || Arrays.asList(library.getModules()).contains(packageID.orgName.value + "/" + packageID.name.value)) && !(library.getScope() != null && library.getScope().equalsIgnoreCase("testOnly"))) { String libFilePath = library.getPath(); if (libFilePath == null) { continue; } Path nativeFile = sourceRootPath.resolve(Paths.get(libFilePath)); if (importPathDependency.isPresent()) { platformLibs.add(nativeFile.getFileName()); } modulePlatformLibs.add(nativeFile); } } } importPathDependency.ifPresent(dependency -> validateBaloDependencies(packageID, platformLibs, dependency.getMetadata().getPath())); } private void validateBaloDependencies(PackageID packageID, List<Path> platformLibs, Path importDependencyPath) { Manifest manifestFromBalo = RepoUtils.getManifestFromBalo(importDependencyPath); List<Library> baloDependencies = manifestFromBalo.getPlatform().libraries; List<Path> baloCompileScopeDependencies = new ArrayList<>(); if (baloDependencies == null) { return; } for (Library baloTomlLib : baloDependencies) { if (baloTomlLib.getScope() != null && baloTomlLib.getScope().equalsIgnoreCase("provided")) { baloCompileScopeDependencies.add(Paths.get(baloTomlLib.getPath()).getFileName()); } } for (Path baloTomlLib : baloCompileScopeDependencies) { if (!platformLibs.contains(baloTomlLib)) { buildContext.out().println("warning: " + packageID + " is missing a native library dependency - " + baloTomlLib); } } } private void addLibsFromBalo(Path baloFilePath, List<Path> moduleDependencySet) { String fileName = baloFilePath.getFileName().toString(); Path baloFileUnzipDirectory = Paths.get(baloFilePath.getParent().toString(), fileName.substring(0, fileName.lastIndexOf('.'))); File destFile = baloFileUnzipDirectory.toFile(); if (!destFile.mkdir()) { try (DirectoryStream<Path> stream = Files.newDirectoryStream(Paths.get(destFile.toString()))) { for (Path path : stream) { moduleDependencySet.add(path); } } catch (IOException e) { throw createLauncherException("unable to copy native jar: " + e.getMessage()); } return; } try (JarFile jar = new JarFile(baloFilePath.toFile())) { Enumeration<JarEntry> enumEntries = jar.entries(); while (enumEntries.hasMoreElements()) { JarEntry file = enumEntries.nextElement(); String entryName = file.getName(); if (!entryName.endsWith(BLANG_COMPILED_JAR_EXT) || !entryName.contains(BALO_PLATFORM_LIB_DIR_NAME)) { continue; } File f = Paths.get(baloFileUnzipDirectory.toString(), entryName.split(BALO_PLATFORM_LIB_DIR_NAME)[1]).toFile(); if (!f.exists()) { try (InputStream is = jar.getInputStream(file)) { Files.copy(is, f.toPath()); } } moduleDependencySet.add(f.toPath()); } } catch (IOException e) { throw createLauncherException("unable to copy native jar: " + e.getMessage()); } } private List<Path> getDependenciesFromDist(PackageID packageID) { List<Path> libPaths = new ArrayList<>(); File tomlFile = getTomlFilePath(packageID); if (skipCopyLibsFromDist) { return null; } Toml tomlConfig = new Toml().read(tomlFile); Toml platform = tomlConfig.getTable("platform"); if (platform == null) { return null; } List<Object> libraries = platform.getList("libraries"); if (libraries == null) { return null; } for (Object lib : libraries) { Path fileName = Paths.get(((HashMap) lib).get("path").toString()).getFileName(); libPaths.add(Paths.get(balHomePath, "bre", "lib", fileName.toString())); } return libPaths; } private Path getJarFromDistribution(PackageID packageID) { List<Path> dependencies = getDependenciesFromDist(packageID); Path jarPath = null; if (dependencies == null) { return null; } for (Path dependency: dependencies) { if (dependency.getFileName().toString().equals(String.join("-", packageID.orgName.value, packageID.name.value, RepoUtils.getBallerinaVersion()).concat(".jar"))) { jarPath = dependency; break; } } return jarPath; } }
Don't we need to check for `ERROR_VARIABLE` here? Is it final by default? I think it is better to add a default clause as well which throws an error to make sure that no unexpected value gets ignored here. WDYT?
private void recursivelySetFinalFlag(BLangVariable variable) { switch (variable.getKind()) { case VARIABLE: variable.symbol.flags |= Flags.FINAL; break; case TUPLE_VARIABLE: ((BLangTupleVariable) variable).memberVariables.forEach(this::recursivelySetFinalFlag); break; case RECORD_VARIABLE: ((BLangRecordVariable) variable).variableList.forEach(value -> recursivelySetFinalFlag(value.valueBindingPattern)); break; } }
switch (variable.getKind()) {
private void recursivelySetFinalFlag(BLangVariable variable) { if (variable == null) { return; } switch (variable.getKind()) { case VARIABLE: if (variable.symbol == null) { return; } variable.symbol.flags |= Flags.FINAL; break; case TUPLE_VARIABLE: BLangTupleVariable tupleVariable = (BLangTupleVariable) variable; tupleVariable.memberVariables.forEach(this::recursivelySetFinalFlag); recursivelySetFinalFlag(tupleVariable.restVariable); break; case RECORD_VARIABLE: BLangRecordVariable recordVariable = (BLangRecordVariable) variable; recordVariable.variableList.forEach(value -> recursivelySetFinalFlag(value.valueBindingPattern)); recursivelySetFinalFlag((BLangVariable) recordVariable.restParam); break; case ERROR_VARIABLE: BLangErrorVariable errorVariable = (BLangErrorVariable) variable; recursivelySetFinalFlag(errorVariable.reason); recursivelySetFinalFlag(errorVariable.restDetail); errorVariable.detail.forEach(bLangErrorDetailEntry -> recursivelySetFinalFlag(bLangErrorDetailEntry.valueBindingPattern)); break; } }
class SemanticAnalyzer extends BLangNodeVisitor { private static final CompilerContext.Key<SemanticAnalyzer> SYMBOL_ANALYZER_KEY = new CompilerContext.Key<>(); private static final String ANONYMOUS_RECORD_NAME = "anonymous-record"; private static final String NULL_LITERAL = "null"; private SymbolTable symTable; private SymbolEnter symbolEnter; private Names names; private SymbolResolver symResolver; private TypeChecker typeChecker; private Types types; private StreamsQuerySemanticAnalyzer streamsQuerySemanticAnalyzer; private BLangDiagnosticLog dlog; private TypeNarrower typeNarrower; private ConstantValueResolver constantValueResolver; private SymbolEnv env; private BType expType; private DiagnosticCode diagCode; private BType resType; private Stack<SymbolEnv> prevEnvs = new Stack<>(); public static SemanticAnalyzer getInstance(CompilerContext context) { SemanticAnalyzer semAnalyzer = context.get(SYMBOL_ANALYZER_KEY); if (semAnalyzer == null) { semAnalyzer = new SemanticAnalyzer(context); } return semAnalyzer; } public SemanticAnalyzer(CompilerContext context) { context.put(SYMBOL_ANALYZER_KEY, this); this.symTable = SymbolTable.getInstance(context); this.symbolEnter = SymbolEnter.getInstance(context); this.names = Names.getInstance(context); this.symResolver = SymbolResolver.getInstance(context); this.typeChecker = TypeChecker.getInstance(context); this.types = Types.getInstance(context); this.streamsQuerySemanticAnalyzer = StreamsQuerySemanticAnalyzer.getInstance(context); this.dlog = BLangDiagnosticLog.getInstance(context); this.typeNarrower = TypeNarrower.getInstance(context); this.constantValueResolver = ConstantValueResolver.getInstance(context); } public BLangPackage analyze(BLangPackage pkgNode) { pkgNode.accept(this); return pkgNode; } public void visit(BLangPackage pkgNode) { if (pkgNode.completedPhases.contains(CompilerPhase.TYPE_CHECK)) { return; } SymbolEnv pkgEnv = this.symTable.pkgEnvMap.get(pkgNode.symbol); pkgNode.topLevelNodes.stream().filter(pkgLevelNode -> pkgLevelNode.getKind() == NodeKind.CONSTANT) .forEach(constant -> analyzeDef((BLangNode) constant, pkgEnv)); this.constantValueResolver.resolve(pkgNode.constants); pkgNode.topLevelNodes.stream().filter(pkgLevelNode -> pkgLevelNode.getKind() != NodeKind.CONSTANT) .filter(pkgLevelNode -> !(pkgLevelNode.getKind() == NodeKind.FUNCTION && ((BLangFunction) pkgLevelNode).flagSet.contains(Flag.LAMBDA))) .forEach(topLevelNode -> analyzeDef((BLangNode) topLevelNode, pkgEnv)); while (pkgNode.lambdaFunctions.peek() != null) { BLangLambdaFunction lambdaFunction = pkgNode.lambdaFunctions.poll(); BLangFunction function = lambdaFunction.function; lambdaFunction.type = function.symbol.type; analyzeDef(lambdaFunction.function, lambdaFunction.cachedEnv); } pkgNode.getTestablePkgs().forEach(testablePackage -> visit((BLangPackage) testablePackage)); pkgNode.completedPhases.add(CompilerPhase.TYPE_CHECK); } public void visit(BLangXMLNS xmlnsNode) { xmlnsNode.type = symTable.stringType; if (xmlnsNode.symbol == null) { symbolEnter.defineNode(xmlnsNode, env); } typeChecker.checkExpr(xmlnsNode.namespaceURI, env, symTable.stringType); } public void visit(BLangXMLNSStatement xmlnsStmtNode) { analyzeNode(xmlnsStmtNode.xmlnsDecl, env); } public void visit(BLangFunction funcNode) { SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env); funcNode.symbol.params.forEach(param -> param.flags |= Flags.FUNCTION_FINAL); funcNode.annAttachments.forEach(annotationAttachment -> { if (Symbols.isFlagOn(funcNode.symbol.flags, Flags.RESOURCE)) { annotationAttachment.attachPoints.add(AttachPoint.Point.RESOURCE); } else if (funcNode.attachedFunction) { annotationAttachment.attachPoints.add(AttachPoint.Point.OBJECT_METHOD); } annotationAttachment.attachPoints.add(AttachPoint.Point.FUNCTION); this.analyzeDef(annotationAttachment, funcEnv); }); validateAnnotationAttachmentCount(funcNode.annAttachments); if (funcNode.returnTypeNode != null) { funcNode.returnTypeAnnAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.RETURN); this.analyzeDef(annotationAttachment, funcEnv); }); validateAnnotationAttachmentCount(funcNode.returnTypeAnnAttachments); } if (Symbols.isNative(funcNode.symbol)) { funcNode.externalAnnAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.EXTERNAL); this.analyzeDef(annotationAttachment, funcEnv); }); validateAnnotationAttachmentCount(funcNode.externalAnnAttachments); } for (BLangSimpleVariable param : funcNode.requiredParams) { symbolEnter.defineExistingVarSymbolInEnv(param.symbol, funcNode.clonedEnv); this.analyzeDef(param, funcNode.clonedEnv); } if (funcNode.restParam != null) { symbolEnter.defineExistingVarSymbolInEnv(funcNode.restParam.symbol, funcNode.clonedEnv); this.analyzeDef(funcNode.restParam, funcNode.clonedEnv); } validateObjectAttachedFunction(funcNode); if (Symbols.isNative(funcNode.symbol) || funcNode.interfaceFunction) { if (funcNode.body != null) { dlog.error(funcNode.pos, DiagnosticCode.EXTERN_FUNCTION_CANNOT_HAVE_BODY, funcNode.name); } return; } if (funcNode.body != null) { analyzeStmt(funcNode.body, funcEnv); } this.processWorkers(funcNode, funcEnv); } private void processWorkers(BLangInvokableNode invNode, SymbolEnv invEnv) { if (invNode.workers.size() > 0) { invEnv.scope.entries.putAll(invNode.body.scope.entries); invNode.workers.forEach(e -> this.symbolEnter.defineNode(e, invEnv)); invNode.workers.forEach(e -> analyzeNode(e, invEnv)); } } @Override public void visit(BLangTypeDefinition typeDefinition) { if (typeDefinition.typeNode.getKind() == NodeKind.OBJECT_TYPE || typeDefinition.typeNode.getKind() == NodeKind.RECORD_TYPE || typeDefinition.typeNode.getKind() == NodeKind.ERROR_TYPE || typeDefinition.typeNode.getKind() == NodeKind.FINITE_TYPE_NODE) { analyzeDef(typeDefinition.typeNode, env); } typeDefinition.annAttachments.forEach(annotationAttachment -> { if (typeDefinition.typeNode.getKind() == NodeKind.OBJECT_TYPE) { annotationAttachment.attachPoints.add(AttachPoint.Point.OBJECT); } annotationAttachment.attachPoints.add(AttachPoint.Point.TYPE); annotationAttachment.accept(this); }); validateAnnotationAttachmentCount(typeDefinition.annAttachments); } public void visit(BLangTypeConversionExpr conversionExpr) { conversionExpr.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.TYPE); if (conversionExpr.typeNode.getKind() == NodeKind.OBJECT_TYPE) { annotationAttachment.attachPoints.add(AttachPoint.Point.OBJECT); } annotationAttachment.accept(this); }); validateAnnotationAttachmentCount(conversionExpr.annAttachments); } @Override public void visit(BLangFiniteTypeNode finiteTypeNode) { finiteTypeNode.valueSpace.forEach(val -> { if (val.type.tag == TypeTags.NIL && NULL_LITERAL.equals(((BLangLiteral) val).originalValue)) { dlog.error(val.pos, DiagnosticCode.INVALID_USE_OF_NULL_LITERAL); } }); } @Override public void visit(BLangObjectTypeNode objectTypeNode) { SymbolEnv objectEnv = SymbolEnv.createTypeEnv(objectTypeNode, objectTypeNode.symbol.scope, env); boolean isAbstract = objectTypeNode.flagSet.contains(Flag.ABSTRACT); objectTypeNode.fields.forEach(field -> { analyzeDef(field, objectEnv); if (isAbstract && field.flagSet.contains(Flag.PRIVATE)) { this.dlog.error(field.pos, DiagnosticCode.PRIVATE_FIELD_ABSTRACT_OBJECT, field.symbol.name); } }); objectTypeNode.functions.forEach(func -> { analyzeDef(func, env); if (isAbstract && func.flagSet.contains(Flag.PRIVATE)) { this.dlog.error(func.pos, DiagnosticCode.PRIVATE_FUNC_ABSTRACT_OBJECT, func.name, objectTypeNode.symbol.name); } if (isAbstract && func.flagSet.contains(Flag.NATIVE)) { this.dlog.error(func.pos, DiagnosticCode.EXTERN_FUNC_ABSTRACT_OBJECT, func.name, objectTypeNode.symbol.name); } if (func.flagSet.contains(Flag.RESOURCE) && func.flagSet.contains(Flag.NATIVE)) { this.dlog.error(func.pos, DiagnosticCode.RESOURCE_FUNCTION_CANNOT_BE_EXTERN, func.name); } }); ((BObjectTypeSymbol) objectTypeNode.symbol).referencedFunctions .forEach(func -> validateReferencedFunction(objectTypeNode.pos, func, env)); if (objectTypeNode.initFunction == null) { return; } if (objectTypeNode.initFunction.flagSet.contains(Flag.PRIVATE)) { this.dlog.error(objectTypeNode.initFunction.pos, DiagnosticCode.PRIVATE_OBJECT_CONSTRUCTOR, objectTypeNode.symbol.name); return; } if (objectTypeNode.flagSet.contains(Flag.ABSTRACT)) { this.dlog.error(objectTypeNode.initFunction.pos, DiagnosticCode.ABSTRACT_OBJECT_CONSTRUCTOR, objectTypeNode.symbol.name); return; } if (objectTypeNode.initFunction.flagSet.contains(Flag.NATIVE)) { this.dlog.error(objectTypeNode.initFunction.pos, DiagnosticCode.OBJECT_INIT_FUNCTION_CANNOT_BE_EXTERN, objectTypeNode.symbol.name); return; } analyzeDef(objectTypeNode.initFunction, env); } @Override public void visit(BLangRecordTypeNode recordTypeNode) { SymbolEnv recordEnv = SymbolEnv.createTypeEnv(recordTypeNode, recordTypeNode.symbol.scope, env); recordTypeNode.fields.forEach(field -> analyzeDef(field, recordEnv)); analyzeDef(recordTypeNode.initFunction, recordEnv); validateDefaultable(recordTypeNode); } @Override public void visit(BLangErrorType errorType) { BType reasonType = getReasonType(errorType); if (!types.isAssignable(reasonType, symTable.stringType)) { dlog.error(errorType.reasonType.pos, DiagnosticCode.INVALID_ERROR_REASON_TYPE, reasonType); } if (errorType.detailType == null) { return; } BType detailType = errorType.detailType.type; if (!types.isValidErrorDetailType(detailType)) { dlog.error(errorType.detailType.pos, DiagnosticCode.INVALID_ERROR_DETAIL_TYPE, detailType, symTable.detailType); } } private BType getReasonType(BLangErrorType errorType) { if (errorType.reasonType == null) { return symTable.stringType; } return errorType.reasonType.type; } public void visit(BLangAnnotation annotationNode) { annotationNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.ANNOTATION); annotationAttachment.accept(this); }); validateAnnotationAttachmentCount(annotationNode.annAttachments); } public void visit(BLangAnnotationAttachment annAttachmentNode) { BSymbol symbol = this.symResolver.resolveAnnotation(annAttachmentNode.pos, env, names.fromString(annAttachmentNode.pkgAlias.getValue()), names.fromString(annAttachmentNode.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, annAttachmentNode.getAnnotationName().getValue()); return; } BAnnotationSymbol annotationSymbol = (BAnnotationSymbol) symbol; annAttachmentNode.annotationSymbol = annotationSymbol; if (annotationSymbol.maskedPoints > 0 && !Symbols.isAttachPointPresent(annotationSymbol.maskedPoints, AttachPoints.asMask(annAttachmentNode.attachPoints))) { String msg = annAttachmentNode.attachPoints.stream() .map(point -> point.name().toLowerCase()) .collect(Collectors.joining(", ")); this.dlog.error(annAttachmentNode.pos, DiagnosticCode.ANNOTATION_NOT_ALLOWED, annotationSymbol, msg); } validateAnnotationAttachmentExpr(annAttachmentNode, annotationSymbol); } public void visit(BLangSimpleVariable varNode) { if (varNode.isDeclaredWithVar) { validateWorkerAnnAttachments(varNode.expr); handleDeclaredWithVar(varNode); return; } int ownerSymTag = env.scope.owner.tag; if ((ownerSymTag & SymTag.INVOKABLE) == SymTag.INVOKABLE) { if (varNode.symbol == null) { symbolEnter.defineNode(varNode, env); varNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.VAR); annotationAttachment.accept(this); }); } else { varNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.PARAMETER); annotationAttachment.accept(this); }); } } else { varNode.annAttachments.forEach(annotationAttachment -> { if (Symbols.isFlagOn(varNode.symbol.flags, Flags.LISTENER)) { annotationAttachment.attachPoints.add(AttachPoint.Point.LISTENER); } else if (Symbols.isFlagOn(varNode.symbol.flags, Flags.SERVICE)) { annotationAttachment.attachPoints.add(AttachPoint.Point.SERVICE); } else { annotationAttachment.attachPoints.add(AttachPoint.Point.VAR); } annotationAttachment.accept(this); }); } validateAnnotationAttachmentCount(varNode.annAttachments); validateWorkerAnnAttachments(varNode.expr); if (varNode.name.value.equals(Names.IGNORE.value)) { varNode.symbol = new BVarSymbol(0, Names.IGNORE, env.enclPkg.packageID, symTable.anyType, env.scope.owner); } BType lhsType = varNode.symbol.type; varNode.type = lhsType; BLangExpression rhsExpr = varNode.expr; if (rhsExpr == null) { if (lhsType.tag == TypeTags.ARRAY && typeChecker.isArrayOpenSealedType((BArrayType) lhsType)) { dlog.error(varNode.pos, DiagnosticCode.SEALED_ARRAY_TYPE_NOT_INITIALIZED); } return; } SymbolEnv varInitEnv = SymbolEnv.createVarInitEnv(varNode, env, varNode.symbol); typeChecker.checkExpr(rhsExpr, varInitEnv, lhsType); if (Symbols.isFlagOn(varNode.symbol.flags, Flags.LISTENER) && !types.checkListenerCompatibility(varNode.symbol.type)) { dlog.error(varNode.pos, DiagnosticCode.INVALID_LISTENER_VARIABLE, varNode.name); } } /** * Validate annotation attachment of the `start` action or workers. * * @param expr expression to be validated. */ private void validateWorkerAnnAttachments(BLangExpression expr) { if (expr != null && expr.getKind() == NodeKind.INVOCATION && ((BLangInvocation) expr).async) { ((BLangInvocation) expr).annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.WORKER); annotationAttachment.accept(this); }); validateAnnotationAttachmentCount(((BLangInvocation) expr).annAttachments); } } public void visit(BLangRecordVariable varNode) { if (varNode.isDeclaredWithVar) { handleDeclaredWithVar(varNode); return; } if (varNode.type == null) { varNode.type = symResolver.resolveTypeNode(varNode.typeNode, env); } if (!validateRecordVariable(varNode)) { varNode.type = symTable.semanticError; return; } symbolEnter.defineNode(varNode, env); if (varNode.expr == null) { return; } typeChecker.checkExpr(varNode.expr, env, varNode.type); } public void visit(BLangTupleVariable varNode) { if (varNode.isDeclaredWithVar) { expType = resolveTupleType(varNode); handleDeclaredWithVar(varNode); return; } if (varNode.type == null) { varNode.type = symResolver.resolveTypeNode(varNode.typeNode, env); } if (!(checkTypeAndVarCountConsistency(varNode))) { varNode.type = symTable.semanticError; return; } symbolEnter.defineNode(varNode, env); if (varNode.expr == null) { return; } typeChecker.checkExpr(varNode.expr, env, varNode.type); } private BType resolveTupleType(BLangTupleVariable varNode) { List<BType> memberTypes = new ArrayList<>(varNode.memberVariables.size()); for (BLangVariable memberVariable : varNode.memberVariables) { if (memberVariable.getKind() == NodeKind.TUPLE_VARIABLE) { memberTypes.add(resolveTupleType((BLangTupleVariable) memberVariable)); } else { memberTypes.add(symTable.noType); } } return new BTupleType(memberTypes); } public void visit(BLangErrorVariable varNode) { if (varNode.isDeclaredWithVar) { handleDeclaredWithVar(varNode); return; } if (varNode.type == null) { varNode.type = symResolver.resolveTypeNode(varNode.typeNode, env); } if (!varNode.reasonVarPrefixAvailable && varNode.type == null) { BErrorType errorType = new BErrorType(varNode.type.tsymbol, null, null); if (varNode.type.tag == TypeTags.UNION) { Set<BType> members = types.expandAndGetMemberTypesRecursive(varNode.type); List<BErrorType> errorMembers = members.stream() .filter(m -> m.tag == TypeTags.ERROR) .map(m -> (BErrorType) m) .collect(Collectors.toList()); if (errorMembers.isEmpty()) { dlog.error(varNode.pos, DiagnosticCode.INVALID_ERROR_MATCH_PATTERN); return; } else if (errorMembers.size() == 1) { errorType.detailType = errorMembers.get(0).detailType; errorType.reasonType = errorMembers.get(0).reasonType; } else { errorType.detailType = symTable.detailType; errorType.reasonType = symTable.stringType; } varNode.type = errorType; } else if (varNode.type.tag == TypeTags.ERROR) { errorType.detailType = ((BErrorType) varNode.type).detailType; } if (varNode.reasonMatchConst != null) { BTypeSymbol reasonConstTypeSymbol = new BTypeSymbol(SymTag.FINITE_TYPE, Flags.PUBLIC, names.fromString(""), this.env.enclPkg.packageID, null, this.env.scope.owner); varNode.reasonMatchConst.type = symTable.stringType; typeChecker.checkExpr(varNode.reasonMatchConst, env); LinkedHashSet<BLangExpression> members = new LinkedHashSet<>(); members.add(varNode.reasonMatchConst); errorType.reasonType = new BFiniteType(reasonConstTypeSymbol, members); } else { errorType.reasonType = symTable.stringType; } } if (!validateErrorVariable(varNode)) { varNode.type = symTable.semanticError; return; } symbolEnter.defineNode(varNode, env); if (varNode.expr == null) { return; } typeChecker.checkExpr(varNode.expr, env, varNode.type); } private void handleDeclaredWithVar(BLangVariable variable) { BLangExpression varRefExpr = variable.expr; BType rhsType = typeChecker.checkExpr(varRefExpr, this.env, expType); switch (variable.getKind()) { case VARIABLE: if (!validateVariableDefinition(varRefExpr)) { rhsType = symTable.semanticError; } BLangSimpleVariable simpleVariable = (BLangSimpleVariable) variable; Name varName = names.fromIdNode(simpleVariable.name); if (varName == Names.IGNORE) { dlog.error(simpleVariable.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); return; } simpleVariable.type = rhsType; int ownerSymTag = env.scope.owner.tag; if ((ownerSymTag & SymTag.INVOKABLE) == SymTag.INVOKABLE) { if (simpleVariable.symbol == null) { symbolEnter.defineNode(simpleVariable, env); } } simpleVariable.symbol.type = rhsType; break; case TUPLE_VARIABLE: if (variable.isDeclaredWithVar && variable.expr.getKind() == NodeKind.LIST_CONSTRUCTOR_EXPR) { dlog.error(varRefExpr.pos, DiagnosticCode.INVALID_LITERAL_FOR_TYPE, "tuple binding pattern"); variable.type = symTable.semanticError; return; } if (TypeTags.TUPLE != rhsType.tag) { dlog.error(varRefExpr.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_TUPLE_VAR, rhsType); variable.type = symTable.semanticError; return; } BLangTupleVariable tupleVariable = (BLangTupleVariable) variable; tupleVariable.type = rhsType; if (!(checkTypeAndVarCountConsistency(tupleVariable))) { tupleVariable.type = symTable.semanticError; return; } symbolEnter.defineNode(tupleVariable, env); break; case RECORD_VARIABLE: if (TypeTags.RECORD != rhsType.tag && TypeTags.MAP != rhsType.tag && TypeTags.JSON != rhsType.tag) { dlog.error(varRefExpr.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_RECORD_VAR, rhsType); variable.type = symTable.semanticError; } BLangRecordVariable recordVariable = (BLangRecordVariable) variable; recordVariable.type = rhsType; if (!validateRecordVariable(recordVariable)) { recordVariable.type = symTable.semanticError; } break; case ERROR_VARIABLE: if (TypeTags.ERROR != rhsType.tag) { dlog.error(variable.expr.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_ERROR_VAR, rhsType); variable.type = symTable.semanticError; return; } BLangErrorVariable errorVariable = (BLangErrorVariable) variable; errorVariable.type = rhsType; if (!validateErrorVariable(errorVariable)) { errorVariable.type = symTable.semanticError; return; } symbolEnter.defineNode(errorVariable, env); break; } } private void handleDeclaredWithVar(BLangVariable variable, BType rhsType, SymbolEnv blockEnv) { switch (variable.getKind()) { case VARIABLE: BLangSimpleVariable simpleVariable = (BLangSimpleVariable) variable; Name varName = names.fromIdNode(simpleVariable.name); if (varName == Names.IGNORE) { dlog.error(simpleVariable.pos, DiagnosticCode.UNDERSCORE_NOT_ALLOWED); return; } simpleVariable.type = rhsType; int ownerSymTag = blockEnv.scope.owner.tag; if ((ownerSymTag & SymTag.INVOKABLE) == SymTag.INVOKABLE) { if (simpleVariable.symbol == null) { symbolEnter.defineNode(simpleVariable, blockEnv); } } recursivelySetFinalFlag(simpleVariable); break; case TUPLE_VARIABLE: BLangTupleVariable tupleVariable = (BLangTupleVariable) variable; if (TypeTags.TUPLE != rhsType.tag && TypeTags.UNION != rhsType.tag) { dlog.error(variable.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_TUPLE_VAR, rhsType); recursivelyDefineVariables(tupleVariable, blockEnv); return; } tupleVariable.type = rhsType; if (rhsType.tag == TypeTags.TUPLE && !(checkTypeAndVarCountConsistency(tupleVariable, (BTupleType) tupleVariable.type, blockEnv))) { return; } if (rhsType.tag == TypeTags.UNION && !(checkTypeAndVarCountConsistency(tupleVariable, null, blockEnv))) { return; } symbolEnter.defineNode(tupleVariable, blockEnv); recursivelySetFinalFlag(tupleVariable); break; case RECORD_VARIABLE: BLangRecordVariable recordVariable = (BLangRecordVariable) variable; recordVariable.type = rhsType; validateRecordVariable(recordVariable, blockEnv); recursivelySetFinalFlag(recordVariable); break; case ERROR_VARIABLE: BLangErrorVariable errorVariable = (BLangErrorVariable) variable; if (TypeTags.ERROR != rhsType.tag) { dlog.error(variable.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_ERROR_VAR, rhsType); recursivelyDefineVariables(errorVariable, blockEnv); return; } errorVariable.type = rhsType; validateErrorVariable(errorVariable); recursivelySetFinalFlag(errorVariable); break; } } private void recursivelyDefineVariables(BLangVariable variable, SymbolEnv blockEnv) { switch (variable.getKind()) { case VARIABLE: Name name = names.fromIdNode(((BLangSimpleVariable) variable).name); if (name == Names.IGNORE) { return; } variable.type = symTable.semanticError; symbolEnter.defineVarSymbol(variable.pos, variable.flagSet, variable.type, name, blockEnv); break; case TUPLE_VARIABLE: ((BLangTupleVariable) variable).memberVariables.forEach(memberVariable -> recursivelyDefineVariables(memberVariable, blockEnv)); break; case RECORD_VARIABLE: ((BLangRecordVariable) variable).variableList.forEach(value -> recursivelyDefineVariables(value.valueBindingPattern, blockEnv)); break; } } private boolean checkTypeAndVarCountConsistency(BLangTupleVariable varNode) { return checkTypeAndVarCountConsistency(varNode, null, env); } private boolean checkTypeAndVarCountConsistency(BLangTupleVariable varNode, BTupleType tupleTypeNode, SymbolEnv env) { if (tupleTypeNode == null) { /* This switch block will resolve the tuple type of the tuple variable. For example consider the following - [int, string]|[boolean, float] [a, b] = foo(); Since the varNode type is a union, the types of 'a' and 'b' will be resolved as follows: Type of 'a' will be (int | boolean) while the type of 'b' will be (string | float). Consider anydata (a, b) = foo(); Here, the type of 'a'and type of 'b' will be both anydata. */ switch (varNode.type.tag) { case TypeTags.UNION: Set<BType> unionType = types.expandAndGetMemberTypesRecursive(varNode.type); List<BType> possibleTypes = unionType.stream() .filter(type -> { if (TypeTags.TUPLE == type.tag && (varNode.memberVariables.size() == ((BTupleType) type).tupleTypes.size())) { return true; } return TypeTags.ANY == type.tag || TypeTags.ANYDATA == type.tag; }) .collect(Collectors.toList()); if (possibleTypes.isEmpty()) { dlog.error(varNode.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_TUPLE_VAR, varNode.type); return false; } if (possibleTypes.size() > 1) { List<BType> memberTupleTypes = new ArrayList<>(); for (int i = 0; i < varNode.memberVariables.size(); i++) { LinkedHashSet<BType> memberTypes = new LinkedHashSet<>(); for (BType possibleType : possibleTypes) { if (possibleType.tag == TypeTags.TUPLE) { memberTypes.add(((BTupleType) possibleType).tupleTypes.get(i)); } else { memberTupleTypes.add(varNode.type); } } if (memberTypes.size() > 1) { memberTupleTypes.add(BUnionType.create(null, memberTypes)); } else { memberTupleTypes.addAll(memberTypes); } } tupleTypeNode = new BTupleType(memberTupleTypes); break; } if (possibleTypes.get(0).tag == TypeTags.TUPLE) { tupleTypeNode = (BTupleType) possibleTypes.get(0); break; } List<BType> memberTypes = new ArrayList<>(); for (int i = 0; i < varNode.memberVariables.size(); i++) { memberTypes.add(possibleTypes.get(0)); } tupleTypeNode = new BTupleType(memberTypes); break; case TypeTags.ANY: case TypeTags.ANYDATA: List<BType> memberTupleTypes = new ArrayList<>(); for (int i = 0; i < varNode.memberVariables.size(); i++) { memberTupleTypes.add(varNode.type); } tupleTypeNode = new BTupleType(memberTupleTypes); if (varNode.restVariable != null) { tupleTypeNode.restType = varNode.type; } break; case TypeTags.TUPLE: tupleTypeNode = (BTupleType) varNode.type; break; default: dlog.error(varNode.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_TUPLE_VAR, varNode.type); return false; } } if (tupleTypeNode.tupleTypes.size() != varNode.memberVariables.size() || (tupleTypeNode.restType == null && varNode.restVariable != null) || (tupleTypeNode.restType != null && varNode.restVariable == null)) { dlog.error(varNode.pos, DiagnosticCode.INVALID_TUPLE_BINDING_PATTERN); return false; } int ignoredCount = 0; List<BLangVariable> memberVariables = new ArrayList<>(varNode.memberVariables); if (varNode.restVariable != null) { memberVariables.add(varNode.restVariable); } for (int i = 0; i < memberVariables.size(); i++) { BLangVariable var = memberVariables.get(i); BType type = (i <= tupleTypeNode.tupleTypes.size() - 1) ? tupleTypeNode.tupleTypes.get(i) : new BArrayType(tupleTypeNode.restType); if (var.getKind() == NodeKind.VARIABLE) { BLangSimpleVariable simpleVar = (BLangSimpleVariable) var; Name varName = names.fromIdNode(simpleVar.name); if (varName == Names.IGNORE) { ignoredCount++; simpleVar.type = symTable.anyType; types.checkType(varNode.pos, type, simpleVar.type, DiagnosticCode.INCOMPATIBLE_TYPES); continue; } } var.type = type; analyzeNode(var, env); } if (!varNode.memberVariables.isEmpty() && ignoredCount == varNode.memberVariables.size() && varNode.restVariable == null) { dlog.error(varNode.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); return false; } return true; } private boolean validateRecordVariable(BLangRecordVariable recordVar) { return validateRecordVariable(recordVar, env); } private boolean validateRecordVariable(BLangRecordVariable recordVar, SymbolEnv env) { BRecordType recordVarType; /* This switch block will resolve the record type of the record variable. For example consider the following - type Foo record {int a, boolean b}; type Bar record {string a, float b}; Foo|Bar {a, b} = foo(); Since the varNode type is a union, the types of 'a' and 'b' will be resolved as follows: Type of 'a' will be a union of the types of field 'a' in both Foo and Bar. i.e. type of 'a' is (int | string) and type of 'b' is (boolean | float). Consider anydata {a, b} = foo(); Here, the type of 'a'and type of 'b' will be both anydata. */ switch (recordVar.type.tag) { case TypeTags.UNION: BUnionType unionType = (BUnionType) recordVar.type; Set<BType> bTypes = types.expandAndGetMemberTypesRecursive(unionType); List<BType> possibleTypes = bTypes.stream() .filter(rec -> doesRecordContainKeys(rec, recordVar.variableList, recordVar.restParam != null)) .collect(Collectors.toList()); if (possibleTypes.isEmpty()) { dlog.error(recordVar.pos, DiagnosticCode.INVALID_RECORD_BINDING_PATTERN, recordVar.type); return false; } if (possibleTypes.size() > 1) { BRecordTypeSymbol recordSymbol = Symbols.createRecordSymbol(0, names.fromString(ANONYMOUS_RECORD_NAME), env.enclPkg.symbol.pkgID, null, env.scope.owner); recordVarType = (BRecordType) symTable.recordType; List<BField> fields = populateAndGetPossibleFieldsForRecVar(recordVar, possibleTypes, recordSymbol); if (recordVar.restParam != null) { LinkedHashSet<BType> memberTypes = possibleTypes.stream() .map(possibleType -> { if (possibleType.tag == TypeTags.RECORD) { return ((BRecordType) possibleType).restFieldType; } else if (possibleType.tag == TypeTags.MAP) { return ((BMapType) possibleType).constraint; } else { return possibleType; } }) .collect(Collectors.toCollection(LinkedHashSet::new)); recordVarType.restFieldType = memberTypes.size() > 1 ? BUnionType.create(null, memberTypes) : memberTypes.iterator().next(); } recordVarType.tsymbol = recordSymbol; recordVarType.fields = fields; recordSymbol.type = recordVarType; break; } if (possibleTypes.get(0).tag == TypeTags.RECORD) { recordVarType = (BRecordType) possibleTypes.get(0); break; } if (possibleTypes.get(0).tag == TypeTags.MAP) { recordVarType = createSameTypedFieldsRecordType(recordVar, ((BMapType) possibleTypes.get(0)).constraint); break; } recordVarType = createSameTypedFieldsRecordType(recordVar, possibleTypes.get(0)); break; case TypeTags.RECORD: recordVarType = (BRecordType) recordVar.type; break; case TypeTags.MAP: recordVarType = createSameTypedFieldsRecordType(recordVar, ((BMapType) recordVar.type).constraint); break; case TypeTags.ANY: case TypeTags.ANYDATA: recordVarType = createSameTypedFieldsRecordType(recordVar, recordVar.type); break; default: dlog.error(recordVar.pos, DiagnosticCode.INVALID_RECORD_BINDING_PATTERN, recordVar.type); return false; } Map<String, BField> recordVarTypeFields = recordVarType.fields.stream() .collect(Collectors.toMap(field -> field.getName().getValue(), field -> field)); boolean validRecord = true; int ignoredCount = 0; for (BLangRecordVariableKeyValue variable : recordVar.variableList) { if (names.fromIdNode(variable.getKey()) == Names.IGNORE) { dlog.error(recordVar.pos, DiagnosticCode.UNDERSCORE_NOT_ALLOWED); continue; } BLangVariable value = variable.getValue(); if (value.getKind() == NodeKind.VARIABLE) { BLangSimpleVariable simpleVar = (BLangSimpleVariable) value; Name varName = names.fromIdNode(simpleVar.name); if (varName == Names.IGNORE) { ignoredCount++; simpleVar.type = symTable.anyType; if (!recordVarTypeFields.containsKey(variable.getKey().getValue())) { continue; } types.checkType(variable.valueBindingPattern.pos, recordVarTypeFields.get((variable.getKey().getValue())).type, simpleVar.type, DiagnosticCode.INCOMPATIBLE_TYPES); continue; } } if (!recordVarTypeFields.containsKey(variable.getKey().getValue())) { if (recordVarType.sealed) { validRecord = false; dlog.error(recordVar.pos, DiagnosticCode.INVALID_FIELD_IN_RECORD_BINDING_PATTERN, variable.getKey().getValue(), recordVar.type); } else { BType restType; if (recordVarType.restFieldType.tag == TypeTags.ANYDATA || recordVarType.restFieldType.tag == TypeTags.ANY) { restType = recordVarType.restFieldType; } else { restType = BUnionType.create(null, recordVarType.restFieldType, symTable.nilType); } value.type = restType; value.accept(this); } continue; } value.type = recordVarTypeFields.get((variable.getKey().getValue())).type; value.accept(this); } if (!recordVar.variableList.isEmpty() && ignoredCount == recordVar.variableList.size() && recordVar.restParam == null) { dlog.error(recordVar.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); return false; } if (recordVar.restParam != null) { ((BLangVariable) recordVar.restParam).type = getRestParamType(recordVarType); symbolEnter.defineNode((BLangNode) recordVar.restParam, env); } return validRecord; } private boolean validateErrorVariable(BLangErrorVariable errorVariable) { BErrorType errorType; switch (errorVariable.type.tag) { case TypeTags.UNION: BUnionType unionType = ((BUnionType) errorVariable.type); List<BErrorType> possibleTypes = unionType.getMemberTypes().stream() .filter(type -> TypeTags.ERROR == type.tag) .map(BErrorType.class::cast) .collect(Collectors.toList()); if (possibleTypes.isEmpty()) { dlog.error(errorVariable.pos, DiagnosticCode.INVALID_ERROR_BINDING_PATTERN, errorVariable.type); return false; } if (possibleTypes.size() > 1) { LinkedHashSet<BType> detailType = new LinkedHashSet<>(); for (BErrorType possibleErrType : possibleTypes) { detailType.add(possibleErrType.detailType); } BType errorDetailType = detailType.size() > 1 ? BUnionType.create(null, detailType) : detailType.iterator().next(); errorType = new BErrorType(null, symTable.stringType, errorDetailType); } else { errorType = possibleTypes.get(0); } break; case TypeTags.ERROR: errorType = (BErrorType) errorVariable.type; break; default: dlog.error(errorVariable.pos, DiagnosticCode.INVALID_ERROR_BINDING_PATTERN, errorVariable.type); return false; } errorVariable.type = errorType; boolean isReasonIgnored = false; BLangSimpleVariable reasonVariable = errorVariable.reason; if (Names.IGNORE == names.fromIdNode(reasonVariable.name)) { reasonVariable.type = symTable.noType; isReasonIgnored = true; } else { errorVariable.reason.type = errorType.reasonType; errorVariable.reason.accept(this); } if (errorVariable.detail == null || (errorVariable.detail.isEmpty() && !isRestDetailBindingAvailable(errorVariable))) { if (isReasonIgnored) { dlog.error(errorVariable.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); return false; } return true; } if (errorType.detailType.getKind() == TypeKind.RECORD) { BRecordType recordType = (BRecordType) errorType.detailType; Map<String, BField> fieldMap = recordType.fields.stream() .collect(Collectors.toMap(f -> f.name.value, f -> f)); for (BLangErrorVariable.BLangErrorDetailEntry errorDetailEntry : errorVariable.detail) { String entryName = errorDetailEntry.key.getValue(); BField entryField = fieldMap.get(entryName); BLangVariable boundVar = errorDetailEntry.valueBindingPattern; if (entryField != null) { if ((entryField.symbol.flags & Flags.OPTIONAL) == Flags.OPTIONAL) { boundVar.type = BUnionType.create(null, entryField.type, symTable.nilType); } else { boundVar.type = entryField.type; } } else { if (recordType.sealed) { dlog.error(errorVariable.pos, DiagnosticCode.INVALID_ERROR_BINDING_PATTERN, errorVariable.type); boundVar.type = symTable.semanticError; return false; } else { boundVar.type = BUnionType.create(null, recordType.restFieldType, symTable.nilType); } } boolean isIgnoredVar = boundVar.getKind() == NodeKind.VARIABLE && ((BLangSimpleVariable) boundVar).name.value.equals(Names.IGNORE.value); if (!isIgnoredVar) { boundVar.accept(this); } } if (isRestDetailBindingAvailable(errorVariable)) { BTypeSymbol typeSymbol = createTypeSymbol(SymTag.TYPE); BMapType restType = new BMapType(TypeTags.MAP, recordType.restFieldType, typeSymbol); typeSymbol.type = restType; errorVariable.restDetail.type = restType; errorVariable.restDetail.accept(this); } return true; } else if (errorType.detailType.getKind() == TypeKind.UNION) { BErrorTypeSymbol errorTypeSymbol = new BErrorTypeSymbol(SymTag.ERROR, Flags.PUBLIC, Names.ERROR, env.enclPkg.packageID, symTable.errorType, env.scope.owner); errorVariable.type = new BErrorType(errorTypeSymbol, symTable.stringType, symTable.detailType); return validateErrorVariable(errorVariable); } if (isRestDetailBindingAvailable(errorVariable)) { errorVariable.restDetail.type = symTable.detailType; errorVariable.restDetail.accept(this); } return true; } private boolean isRestDetailBindingAvailable(BLangErrorVariable errorVariable) { return errorVariable.restDetail != null && !errorVariable.restDetail.name.value.equals(Names.IGNORE.value); } private BTypeSymbol createTypeSymbol(int type) { return new BTypeSymbol(type, Flags.PUBLIC, Names.EMPTY, env.enclPkg.packageID, null, env.scope.owner); } /** * This method will resolve field types based on a list of possible types. * When a record variable has multiple possible assignable types, each field will be a union of the relevant * possible types field type. * * @param recordVar record variable whose fields types are to be resolved * @param possibleTypes list of possible types * @param recordSymbol symbol of the record type to be used in creating fields * @return the list of fields */ private List<BField> populateAndGetPossibleFieldsForRecVar(BLangRecordVariable recordVar, List<BType> possibleTypes, BRecordTypeSymbol recordSymbol) { List<BField> fields = new ArrayList<>(); for (BLangRecordVariableKeyValue bLangRecordVariableKeyValue : recordVar.variableList) { String fieldName = bLangRecordVariableKeyValue.key.value; LinkedHashSet<BType> memberTypes = new LinkedHashSet<>(); for (BType possibleType : possibleTypes) { if (possibleType.tag == TypeTags.RECORD) { BRecordType possibleRecordType = (BRecordType) possibleType; Optional<BField> optionalField = possibleRecordType.fields.stream() .filter(field -> field.getName().getValue().equals(fieldName)) .findFirst(); if (optionalField.isPresent()) { BField bField = optionalField.get(); if (Symbols.isOptional(bField.symbol)) { memberTypes.add(symTable.nilType); } memberTypes.add(bField.type); } else { memberTypes.add(possibleRecordType.restFieldType); memberTypes.add(symTable.nilType); } continue; } if (possibleType.tag == TypeTags.MAP) { BMapType possibleMapType = (BMapType) possibleType; memberTypes.add(possibleMapType.constraint); continue; } memberTypes.add(possibleType); } BType fieldType = memberTypes.size() > 1 ? BUnionType.create(null, memberTypes) : memberTypes.iterator().next(); fields.add(new BField(names.fromString(fieldName), recordVar.pos, new BVarSymbol(0, names.fromString(fieldName), env.enclPkg.symbol.pkgID, fieldType, recordSymbol))); } return fields; }
class SemanticAnalyzer extends BLangNodeVisitor { private static final CompilerContext.Key<SemanticAnalyzer> SYMBOL_ANALYZER_KEY = new CompilerContext.Key<>(); private static final String ANONYMOUS_RECORD_NAME = "anonymous-record"; private static final String NULL_LITERAL = "null"; private static final String LEFT_BRACE = "{"; private static final String RIGHT_BRACE = "}"; private static final String SPACE = " "; public static final String COLON = ":"; private SymbolTable symTable; private SymbolEnter symbolEnter; private Names names; private SymbolResolver symResolver; private TypeChecker typeChecker; private Types types; private StreamsQuerySemanticAnalyzer streamsQuerySemanticAnalyzer; private BLangDiagnosticLog dlog; private TypeNarrower typeNarrower; private ConstantValueResolver constantValueResolver; private SymbolEnv env; private BType expType; private DiagnosticCode diagCode; private BType resType; private Stack<SymbolEnv> prevEnvs = new Stack<>(); public static SemanticAnalyzer getInstance(CompilerContext context) { SemanticAnalyzer semAnalyzer = context.get(SYMBOL_ANALYZER_KEY); if (semAnalyzer == null) { semAnalyzer = new SemanticAnalyzer(context); } return semAnalyzer; } public SemanticAnalyzer(CompilerContext context) { context.put(SYMBOL_ANALYZER_KEY, this); this.symTable = SymbolTable.getInstance(context); this.symbolEnter = SymbolEnter.getInstance(context); this.names = Names.getInstance(context); this.symResolver = SymbolResolver.getInstance(context); this.typeChecker = TypeChecker.getInstance(context); this.types = Types.getInstance(context); this.streamsQuerySemanticAnalyzer = StreamsQuerySemanticAnalyzer.getInstance(context); this.dlog = BLangDiagnosticLog.getInstance(context); this.typeNarrower = TypeNarrower.getInstance(context); this.constantValueResolver = ConstantValueResolver.getInstance(context); } public BLangPackage analyze(BLangPackage pkgNode) { pkgNode.accept(this); return pkgNode; } public void visit(BLangPackage pkgNode) { if (pkgNode.completedPhases.contains(CompilerPhase.TYPE_CHECK)) { return; } SymbolEnv pkgEnv = this.symTable.pkgEnvMap.get(pkgNode.symbol); pkgNode.topLevelNodes.stream().filter(pkgLevelNode -> pkgLevelNode.getKind() == NodeKind.CONSTANT) .forEach(constant -> analyzeDef((BLangNode) constant, pkgEnv)); this.constantValueResolver.resolve(pkgNode.constants); pkgNode.topLevelNodes.stream().filter(pkgLevelNode -> pkgLevelNode.getKind() != NodeKind.CONSTANT) .filter(pkgLevelNode -> !(pkgLevelNode.getKind() == NodeKind.FUNCTION && ((BLangFunction) pkgLevelNode).flagSet.contains(Flag.LAMBDA))) .forEach(topLevelNode -> analyzeDef((BLangNode) topLevelNode, pkgEnv)); while (pkgNode.lambdaFunctions.peek() != null) { BLangLambdaFunction lambdaFunction = pkgNode.lambdaFunctions.poll(); BLangFunction function = lambdaFunction.function; lambdaFunction.type = function.symbol.type; analyzeDef(lambdaFunction.function, lambdaFunction.cachedEnv); } pkgNode.getTestablePkgs().forEach(testablePackage -> visit((BLangPackage) testablePackage)); pkgNode.completedPhases.add(CompilerPhase.TYPE_CHECK); } public void visit(BLangXMLNS xmlnsNode) { xmlnsNode.type = symTable.stringType; if (xmlnsNode.symbol == null) { symbolEnter.defineNode(xmlnsNode, env); } typeChecker.checkExpr(xmlnsNode.namespaceURI, env, symTable.stringType); } public void visit(BLangXMLNSStatement xmlnsStmtNode) { analyzeNode(xmlnsStmtNode.xmlnsDecl, env); } public void visit(BLangFunction funcNode) { SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env); funcNode.symbol.params.forEach(param -> param.flags |= Flags.FUNCTION_FINAL); funcNode.annAttachments.forEach(annotationAttachment -> { if (Symbols.isFlagOn(funcNode.symbol.flags, Flags.RESOURCE)) { annotationAttachment.attachPoints.add(AttachPoint.Point.RESOURCE); } else if (funcNode.attachedFunction) { annotationAttachment.attachPoints.add(AttachPoint.Point.OBJECT_METHOD); } annotationAttachment.attachPoints.add(AttachPoint.Point.FUNCTION); this.analyzeDef(annotationAttachment, funcEnv); }); validateAnnotationAttachmentCount(funcNode.annAttachments); if (funcNode.returnTypeNode != null) { funcNode.returnTypeAnnAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.RETURN); this.analyzeDef(annotationAttachment, funcEnv); }); validateAnnotationAttachmentCount(funcNode.returnTypeAnnAttachments); } if (Symbols.isNative(funcNode.symbol)) { funcNode.externalAnnAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.EXTERNAL); this.analyzeDef(annotationAttachment, funcEnv); }); validateAnnotationAttachmentCount(funcNode.externalAnnAttachments); } for (BLangSimpleVariable param : funcNode.requiredParams) { symbolEnter.defineExistingVarSymbolInEnv(param.symbol, funcNode.clonedEnv); this.analyzeDef(param, funcNode.clonedEnv); } if (funcNode.restParam != null) { symbolEnter.defineExistingVarSymbolInEnv(funcNode.restParam.symbol, funcNode.clonedEnv); this.analyzeDef(funcNode.restParam, funcNode.clonedEnv); } validateObjectAttachedFunction(funcNode); if (Symbols.isNative(funcNode.symbol) || funcNode.interfaceFunction) { if (funcNode.body != null) { dlog.error(funcNode.pos, DiagnosticCode.EXTERN_FUNCTION_CANNOT_HAVE_BODY, funcNode.name); } return; } if (funcNode.body != null) { analyzeStmt(funcNode.body, funcEnv); } this.processWorkers(funcNode, funcEnv); } private void processWorkers(BLangInvokableNode invNode, SymbolEnv invEnv) { if (invNode.workers.size() > 0) { invEnv.scope.entries.putAll(invNode.body.scope.entries); invNode.workers.forEach(e -> this.symbolEnter.defineNode(e, invEnv)); invNode.workers.forEach(e -> analyzeNode(e, invEnv)); } } @Override public void visit(BLangTypeDefinition typeDefinition) { if (typeDefinition.typeNode.getKind() == NodeKind.OBJECT_TYPE || typeDefinition.typeNode.getKind() == NodeKind.RECORD_TYPE || typeDefinition.typeNode.getKind() == NodeKind.ERROR_TYPE || typeDefinition.typeNode.getKind() == NodeKind.FINITE_TYPE_NODE) { analyzeDef(typeDefinition.typeNode, env); } typeDefinition.annAttachments.forEach(annotationAttachment -> { if (typeDefinition.typeNode.getKind() == NodeKind.OBJECT_TYPE) { annotationAttachment.attachPoints.add(AttachPoint.Point.OBJECT); } annotationAttachment.attachPoints.add(AttachPoint.Point.TYPE); annotationAttachment.accept(this); }); validateAnnotationAttachmentCount(typeDefinition.annAttachments); } public void visit(BLangTypeConversionExpr conversionExpr) { conversionExpr.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.TYPE); if (conversionExpr.typeNode.getKind() == NodeKind.OBJECT_TYPE) { annotationAttachment.attachPoints.add(AttachPoint.Point.OBJECT); } annotationAttachment.accept(this); }); validateAnnotationAttachmentCount(conversionExpr.annAttachments); } @Override public void visit(BLangFiniteTypeNode finiteTypeNode) { finiteTypeNode.valueSpace.forEach(val -> { if (val.type.tag == TypeTags.NIL && NULL_LITERAL.equals(((BLangLiteral) val).originalValue)) { dlog.error(val.pos, DiagnosticCode.INVALID_USE_OF_NULL_LITERAL); } }); } @Override public void visit(BLangObjectTypeNode objectTypeNode) { SymbolEnv objectEnv = SymbolEnv.createTypeEnv(objectTypeNode, objectTypeNode.symbol.scope, env); boolean isAbstract = objectTypeNode.flagSet.contains(Flag.ABSTRACT); objectTypeNode.fields.forEach(field -> { analyzeDef(field, objectEnv); if (isAbstract && field.flagSet.contains(Flag.PRIVATE)) { this.dlog.error(field.pos, DiagnosticCode.PRIVATE_FIELD_ABSTRACT_OBJECT, field.symbol.name); } }); objectTypeNode.functions.forEach(func -> { analyzeDef(func, env); if (isAbstract && func.flagSet.contains(Flag.PRIVATE)) { this.dlog.error(func.pos, DiagnosticCode.PRIVATE_FUNC_ABSTRACT_OBJECT, func.name, objectTypeNode.symbol.name); } if (isAbstract && func.flagSet.contains(Flag.NATIVE)) { this.dlog.error(func.pos, DiagnosticCode.EXTERN_FUNC_ABSTRACT_OBJECT, func.name, objectTypeNode.symbol.name); } if (func.flagSet.contains(Flag.RESOURCE) && func.flagSet.contains(Flag.NATIVE)) { this.dlog.error(func.pos, DiagnosticCode.RESOURCE_FUNCTION_CANNOT_BE_EXTERN, func.name); } }); ((BObjectTypeSymbol) objectTypeNode.symbol).referencedFunctions .forEach(func -> validateReferencedFunction(objectTypeNode.pos, func, env)); if (objectTypeNode.initFunction == null) { return; } if (objectTypeNode.initFunction.flagSet.contains(Flag.PRIVATE)) { this.dlog.error(objectTypeNode.initFunction.pos, DiagnosticCode.PRIVATE_OBJECT_CONSTRUCTOR, objectTypeNode.symbol.name); return; } if (objectTypeNode.flagSet.contains(Flag.ABSTRACT)) { this.dlog.error(objectTypeNode.initFunction.pos, DiagnosticCode.ABSTRACT_OBJECT_CONSTRUCTOR, objectTypeNode.symbol.name); return; } if (objectTypeNode.initFunction.flagSet.contains(Flag.NATIVE)) { this.dlog.error(objectTypeNode.initFunction.pos, DiagnosticCode.OBJECT_INIT_FUNCTION_CANNOT_BE_EXTERN, objectTypeNode.symbol.name); return; } analyzeDef(objectTypeNode.initFunction, env); } @Override public void visit(BLangRecordTypeNode recordTypeNode) { SymbolEnv recordEnv = SymbolEnv.createTypeEnv(recordTypeNode, recordTypeNode.symbol.scope, env); recordTypeNode.fields.forEach(field -> analyzeDef(field, recordEnv)); analyzeDef(recordTypeNode.initFunction, recordEnv); validateDefaultable(recordTypeNode); } @Override public void visit(BLangErrorType errorType) { BType reasonType = getReasonType(errorType); if (!types.isAssignable(reasonType, symTable.stringType)) { dlog.error(errorType.reasonType.pos, DiagnosticCode.INVALID_ERROR_REASON_TYPE, reasonType); } else if (errorType.reasonType != null) { validateModuleQualifiedReasons(errorType.reasonType.pos, reasonType); } if (errorType.detailType == null) { return; } BType detailType = errorType.detailType.type; if (!types.isValidErrorDetailType(detailType)) { dlog.error(errorType.detailType.pos, DiagnosticCode.INVALID_ERROR_DETAIL_TYPE, detailType, symTable.detailType); } } private BType getReasonType(BLangErrorType errorType) { if (errorType.reasonType == null) { return symTable.stringType; } return errorType.reasonType.type; } private void validateModuleQualifiedReasons(DiagnosticPos pos, BType reasonType) { switch (reasonType.tag) { case TypeTags.STRING: return; case TypeTags.FINITE: BFiniteType finiteType = (BFiniteType) reasonType; for (BLangExpression expr : finiteType.valueSpace) { validateModuleQualifiedReason(pos, (String) ((BLangLiteral) expr).value); } return; case TypeTags.UNION: ((BUnionType) reasonType).getMemberTypes().forEach(type -> validateModuleQualifiedReasons(pos, type)); } } private void validateModuleQualifiedReason(DiagnosticPos pos, String reason) { if (!reason.startsWith(LEFT_BRACE)) { return; } PackageID currentPackageId = env.enclPkg.packageID; if (currentPackageId.isUnnamed || reason.contains(SPACE) || !reason.startsWith(LEFT_BRACE.concat(currentPackageId.toString().split(COLON)[0]) .concat(RIGHT_BRACE))) { dlog.warning(pos, DiagnosticCode.NON_MODULE_QUALIFIED_ERROR_REASON, reason); } } public void visit(BLangAnnotation annotationNode) { annotationNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.ANNOTATION); annotationAttachment.accept(this); }); validateAnnotationAttachmentCount(annotationNode.annAttachments); } public void visit(BLangAnnotationAttachment annAttachmentNode) { BSymbol symbol = this.symResolver.resolveAnnotation(annAttachmentNode.pos, env, names.fromString(annAttachmentNode.pkgAlias.getValue()), names.fromString(annAttachmentNode.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, annAttachmentNode.getAnnotationName().getValue()); return; } BAnnotationSymbol annotationSymbol = (BAnnotationSymbol) symbol; annAttachmentNode.annotationSymbol = annotationSymbol; if (annotationSymbol.maskedPoints > 0 && !Symbols.isAttachPointPresent(annotationSymbol.maskedPoints, AttachPoints.asMask(annAttachmentNode.attachPoints))) { String msg = annAttachmentNode.attachPoints.stream() .map(point -> point.name().toLowerCase()) .collect(Collectors.joining(", ")); this.dlog.error(annAttachmentNode.pos, DiagnosticCode.ANNOTATION_NOT_ALLOWED, annotationSymbol, msg); } validateAnnotationAttachmentExpr(annAttachmentNode, annotationSymbol); } public void visit(BLangSimpleVariable varNode) { if (varNode.isDeclaredWithVar) { validateWorkerAnnAttachments(varNode.expr); handleDeclaredWithVar(varNode); return; } int ownerSymTag = env.scope.owner.tag; if ((ownerSymTag & SymTag.INVOKABLE) == SymTag.INVOKABLE) { if (varNode.symbol == null) { symbolEnter.defineNode(varNode, env); varNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.VAR); annotationAttachment.accept(this); }); } else { varNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.PARAMETER); annotationAttachment.accept(this); }); } } else { varNode.annAttachments.forEach(annotationAttachment -> { if (Symbols.isFlagOn(varNode.symbol.flags, Flags.LISTENER)) { annotationAttachment.attachPoints.add(AttachPoint.Point.LISTENER); } else if (Symbols.isFlagOn(varNode.symbol.flags, Flags.SERVICE)) { annotationAttachment.attachPoints.add(AttachPoint.Point.SERVICE); } else { annotationAttachment.attachPoints.add(AttachPoint.Point.VAR); } annotationAttachment.accept(this); }); } validateAnnotationAttachmentCount(varNode.annAttachments); validateWorkerAnnAttachments(varNode.expr); if (varNode.name.value.equals(Names.IGNORE.value)) { varNode.symbol = new BVarSymbol(0, Names.IGNORE, env.enclPkg.packageID, symTable.anyType, env.scope.owner); } BType lhsType = varNode.symbol.type; varNode.type = lhsType; BLangExpression rhsExpr = varNode.expr; if (rhsExpr == null) { if (lhsType.tag == TypeTags.ARRAY && typeChecker.isArrayOpenSealedType((BArrayType) lhsType)) { dlog.error(varNode.pos, DiagnosticCode.SEALED_ARRAY_TYPE_NOT_INITIALIZED); } return; } SymbolEnv varInitEnv = SymbolEnv.createVarInitEnv(varNode, env, varNode.symbol); typeChecker.checkExpr(rhsExpr, varInitEnv, lhsType); if (Symbols.isFlagOn(varNode.symbol.flags, Flags.LISTENER) && !types.checkListenerCompatibility(varNode.symbol.type)) { dlog.error(varNode.pos, DiagnosticCode.INVALID_LISTENER_VARIABLE, varNode.name); } } /** * Validate annotation attachment of the `start` action or workers. * * @param expr expression to be validated. */ private void validateWorkerAnnAttachments(BLangExpression expr) { if (expr != null && expr.getKind() == NodeKind.INVOCATION && ((BLangInvocation) expr).async) { ((BLangInvocation) expr).annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachPoints.add(AttachPoint.Point.WORKER); annotationAttachment.accept(this); }); validateAnnotationAttachmentCount(((BLangInvocation) expr).annAttachments); } } public void visit(BLangRecordVariable varNode) { if (varNode.isDeclaredWithVar) { handleDeclaredWithVar(varNode); return; } if (varNode.type == null) { varNode.type = symResolver.resolveTypeNode(varNode.typeNode, env); } if (!validateRecordVariable(varNode)) { varNode.type = symTable.semanticError; return; } symbolEnter.defineNode(varNode, env); if (varNode.expr == null) { return; } typeChecker.checkExpr(varNode.expr, env, varNode.type); } public void visit(BLangTupleVariable varNode) { if (varNode.isDeclaredWithVar) { expType = resolveTupleType(varNode); handleDeclaredWithVar(varNode); return; } if (varNode.type == null) { varNode.type = symResolver.resolveTypeNode(varNode.typeNode, env); } if (!(checkTypeAndVarCountConsistency(varNode))) { varNode.type = symTable.semanticError; return; } symbolEnter.defineNode(varNode, env); if (varNode.expr == null) { return; } typeChecker.checkExpr(varNode.expr, env, varNode.type); } private BType resolveTupleType(BLangTupleVariable varNode) { List<BType> memberTypes = new ArrayList<>(varNode.memberVariables.size()); for (BLangVariable memberVariable : varNode.memberVariables) { if (memberVariable.getKind() == NodeKind.TUPLE_VARIABLE) { memberTypes.add(resolveTupleType((BLangTupleVariable) memberVariable)); } else { memberTypes.add(symTable.noType); } } return new BTupleType(memberTypes); } public void visit(BLangErrorVariable varNode) { if (varNode.isDeclaredWithVar) { handleDeclaredWithVar(varNode); return; } if (varNode.type == null) { varNode.type = symResolver.resolveTypeNode(varNode.typeNode, env); } if (!varNode.reasonVarPrefixAvailable && varNode.type == null) { BErrorType errorType = new BErrorType(varNode.type.tsymbol, null, null); if (varNode.type.tag == TypeTags.UNION) { Set<BType> members = types.expandAndGetMemberTypesRecursive(varNode.type); List<BErrorType> errorMembers = members.stream() .filter(m -> m.tag == TypeTags.ERROR) .map(m -> (BErrorType) m) .collect(Collectors.toList()); if (errorMembers.isEmpty()) { dlog.error(varNode.pos, DiagnosticCode.INVALID_ERROR_MATCH_PATTERN); return; } else if (errorMembers.size() == 1) { errorType.detailType = errorMembers.get(0).detailType; errorType.reasonType = errorMembers.get(0).reasonType; } else { errorType.detailType = symTable.detailType; errorType.reasonType = symTable.stringType; } varNode.type = errorType; } else if (varNode.type.tag == TypeTags.ERROR) { errorType.detailType = ((BErrorType) varNode.type).detailType; } if (varNode.reasonMatchConst != null) { BTypeSymbol reasonConstTypeSymbol = new BTypeSymbol(SymTag.FINITE_TYPE, Flags.PUBLIC, names.fromString(""), this.env.enclPkg.packageID, null, this.env.scope.owner); varNode.reasonMatchConst.type = symTable.stringType; typeChecker.checkExpr(varNode.reasonMatchConst, env); LinkedHashSet<BLangExpression> members = new LinkedHashSet<>(); members.add(varNode.reasonMatchConst); errorType.reasonType = new BFiniteType(reasonConstTypeSymbol, members); } else { errorType.reasonType = symTable.stringType; } } if (!validateErrorVariable(varNode)) { varNode.type = symTable.semanticError; return; } symbolEnter.defineNode(varNode, env); if (varNode.expr == null) { return; } typeChecker.checkExpr(varNode.expr, env, varNode.type); } private void handleDeclaredWithVar(BLangVariable variable) { BLangExpression varRefExpr = variable.expr; BType rhsType = typeChecker.checkExpr(varRefExpr, this.env, expType); switch (variable.getKind()) { case VARIABLE: if (!validateVariableDefinition(varRefExpr)) { rhsType = symTable.semanticError; } BLangSimpleVariable simpleVariable = (BLangSimpleVariable) variable; Name varName = names.fromIdNode(simpleVariable.name); if (varName == Names.IGNORE) { dlog.error(simpleVariable.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); return; } simpleVariable.type = rhsType; int ownerSymTag = env.scope.owner.tag; if ((ownerSymTag & SymTag.INVOKABLE) == SymTag.INVOKABLE) { if (simpleVariable.symbol == null) { symbolEnter.defineNode(simpleVariable, env); } } simpleVariable.symbol.type = rhsType; break; case TUPLE_VARIABLE: if (variable.isDeclaredWithVar && variable.expr.getKind() == NodeKind.LIST_CONSTRUCTOR_EXPR) { dlog.error(varRefExpr.pos, DiagnosticCode.INVALID_LITERAL_FOR_TYPE, "tuple binding pattern"); variable.type = symTable.semanticError; return; } if (TypeTags.TUPLE != rhsType.tag) { dlog.error(varRefExpr.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_TUPLE_VAR, rhsType); variable.type = symTable.semanticError; return; } BLangTupleVariable tupleVariable = (BLangTupleVariable) variable; tupleVariable.type = rhsType; if (!(checkTypeAndVarCountConsistency(tupleVariable))) { tupleVariable.type = symTable.semanticError; return; } symbolEnter.defineNode(tupleVariable, env); break; case RECORD_VARIABLE: if (TypeTags.RECORD != rhsType.tag && TypeTags.MAP != rhsType.tag && TypeTags.JSON != rhsType.tag) { dlog.error(varRefExpr.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_RECORD_VAR, rhsType); variable.type = symTable.semanticError; } BLangRecordVariable recordVariable = (BLangRecordVariable) variable; recordVariable.type = rhsType; if (!validateRecordVariable(recordVariable)) { recordVariable.type = symTable.semanticError; } break; case ERROR_VARIABLE: if (TypeTags.ERROR != rhsType.tag) { dlog.error(variable.expr.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_ERROR_VAR, rhsType); variable.type = symTable.semanticError; return; } BLangErrorVariable errorVariable = (BLangErrorVariable) variable; errorVariable.type = rhsType; if (!validateErrorVariable(errorVariable)) { errorVariable.type = symTable.semanticError; return; } symbolEnter.defineNode(errorVariable, env); break; } } private void handleDeclaredVarInForeach(BLangVariable variable, BType rhsType, SymbolEnv blockEnv) { switch (variable.getKind()) { case VARIABLE: BLangSimpleVariable simpleVariable = (BLangSimpleVariable) variable; Name varName = names.fromIdNode(simpleVariable.name); if (varName == Names.IGNORE) { dlog.error(simpleVariable.pos, DiagnosticCode.UNDERSCORE_NOT_ALLOWED); return; } simpleVariable.type = rhsType; int ownerSymTag = blockEnv.scope.owner.tag; if ((ownerSymTag & SymTag.INVOKABLE) == SymTag.INVOKABLE) { if (simpleVariable.symbol == null) { symbolEnter.defineNode(simpleVariable, blockEnv); } } recursivelySetFinalFlag(simpleVariable); break; case TUPLE_VARIABLE: BLangTupleVariable tupleVariable = (BLangTupleVariable) variable; if (TypeTags.TUPLE != rhsType.tag && TypeTags.UNION != rhsType.tag) { dlog.error(variable.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_TUPLE_VAR, rhsType); recursivelyDefineVariables(tupleVariable, blockEnv); return; } tupleVariable.type = rhsType; if (rhsType.tag == TypeTags.TUPLE && !(checkTypeAndVarCountConsistency(tupleVariable, (BTupleType) tupleVariable.type, blockEnv))) { return; } if (rhsType.tag == TypeTags.UNION && !(checkTypeAndVarCountConsistency(tupleVariable, null, blockEnv))) { return; } symbolEnter.defineNode(tupleVariable, blockEnv); recursivelySetFinalFlag(tupleVariable); break; case RECORD_VARIABLE: BLangRecordVariable recordVariable = (BLangRecordVariable) variable; recordVariable.type = rhsType; validateRecordVariable(recordVariable, blockEnv); recursivelySetFinalFlag(recordVariable); break; case ERROR_VARIABLE: BLangErrorVariable errorVariable = (BLangErrorVariable) variable; if (TypeTags.ERROR != rhsType.tag) { dlog.error(variable.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_ERROR_VAR, rhsType); recursivelyDefineVariables(errorVariable, blockEnv); return; } errorVariable.type = rhsType; validateErrorVariable(errorVariable); recursivelySetFinalFlag(errorVariable); break; } } private void recursivelyDefineVariables(BLangVariable variable, SymbolEnv blockEnv) { switch (variable.getKind()) { case VARIABLE: Name name = names.fromIdNode(((BLangSimpleVariable) variable).name); if (name == Names.IGNORE) { return; } variable.type = symTable.semanticError; symbolEnter.defineVarSymbol(variable.pos, variable.flagSet, variable.type, name, blockEnv); break; case TUPLE_VARIABLE: ((BLangTupleVariable) variable).memberVariables.forEach(memberVariable -> recursivelyDefineVariables(memberVariable, blockEnv)); break; case RECORD_VARIABLE: ((BLangRecordVariable) variable).variableList.forEach(value -> recursivelyDefineVariables(value.valueBindingPattern, blockEnv)); break; } } private boolean checkTypeAndVarCountConsistency(BLangTupleVariable varNode) { return checkTypeAndVarCountConsistency(varNode, null, env); } private boolean checkTypeAndVarCountConsistency(BLangTupleVariable varNode, BTupleType tupleTypeNode, SymbolEnv env) { if (tupleTypeNode == null) { /* This switch block will resolve the tuple type of the tuple variable. For example consider the following - [int, string]|[boolean, float] [a, b] = foo(); Since the varNode type is a union, the types of 'a' and 'b' will be resolved as follows: Type of 'a' will be (int | boolean) while the type of 'b' will be (string | float). Consider anydata (a, b) = foo(); Here, the type of 'a'and type of 'b' will be both anydata. */ switch (varNode.type.tag) { case TypeTags.UNION: Set<BType> unionType = types.expandAndGetMemberTypesRecursive(varNode.type); List<BType> possibleTypes = unionType.stream() .filter(type -> { if (TypeTags.TUPLE == type.tag && (varNode.memberVariables.size() == ((BTupleType) type).tupleTypes.size())) { return true; } return TypeTags.ANY == type.tag || TypeTags.ANYDATA == type.tag; }) .collect(Collectors.toList()); if (possibleTypes.isEmpty()) { dlog.error(varNode.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_TUPLE_VAR, varNode.type); return false; } if (possibleTypes.size() > 1) { List<BType> memberTupleTypes = new ArrayList<>(); for (int i = 0; i < varNode.memberVariables.size(); i++) { LinkedHashSet<BType> memberTypes = new LinkedHashSet<>(); for (BType possibleType : possibleTypes) { if (possibleType.tag == TypeTags.TUPLE) { memberTypes.add(((BTupleType) possibleType).tupleTypes.get(i)); } else { memberTupleTypes.add(varNode.type); } } if (memberTypes.size() > 1) { memberTupleTypes.add(BUnionType.create(null, memberTypes)); } else { memberTupleTypes.addAll(memberTypes); } } tupleTypeNode = new BTupleType(memberTupleTypes); break; } if (possibleTypes.get(0).tag == TypeTags.TUPLE) { tupleTypeNode = (BTupleType) possibleTypes.get(0); break; } List<BType> memberTypes = new ArrayList<>(); for (int i = 0; i < varNode.memberVariables.size(); i++) { memberTypes.add(possibleTypes.get(0)); } tupleTypeNode = new BTupleType(memberTypes); break; case TypeTags.ANY: case TypeTags.ANYDATA: List<BType> memberTupleTypes = new ArrayList<>(); for (int i = 0; i < varNode.memberVariables.size(); i++) { memberTupleTypes.add(varNode.type); } tupleTypeNode = new BTupleType(memberTupleTypes); if (varNode.restVariable != null) { tupleTypeNode.restType = varNode.type; } break; case TypeTags.TUPLE: tupleTypeNode = (BTupleType) varNode.type; break; default: dlog.error(varNode.pos, DiagnosticCode.INVALID_TYPE_DEFINITION_FOR_TUPLE_VAR, varNode.type); return false; } } if (tupleTypeNode.tupleTypes.size() != varNode.memberVariables.size() || (tupleTypeNode.restType == null && varNode.restVariable != null) || (tupleTypeNode.restType != null && varNode.restVariable == null)) { dlog.error(varNode.pos, DiagnosticCode.INVALID_TUPLE_BINDING_PATTERN); return false; } int ignoredCount = 0; List<BLangVariable> memberVariables = new ArrayList<>(varNode.memberVariables); if (varNode.restVariable != null) { memberVariables.add(varNode.restVariable); } for (int i = 0; i < memberVariables.size(); i++) { BLangVariable var = memberVariables.get(i); BType type = (i <= tupleTypeNode.tupleTypes.size() - 1) ? tupleTypeNode.tupleTypes.get(i) : new BArrayType(tupleTypeNode.restType); if (var.getKind() == NodeKind.VARIABLE) { BLangSimpleVariable simpleVar = (BLangSimpleVariable) var; Name varName = names.fromIdNode(simpleVar.name); if (varName == Names.IGNORE) { ignoredCount++; simpleVar.type = symTable.anyType; types.checkType(varNode.pos, type, simpleVar.type, DiagnosticCode.INCOMPATIBLE_TYPES); continue; } } var.type = type; analyzeNode(var, env); } if (!varNode.memberVariables.isEmpty() && ignoredCount == varNode.memberVariables.size() && varNode.restVariable == null) { dlog.error(varNode.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); return false; } return true; } private boolean validateRecordVariable(BLangRecordVariable recordVar) { return validateRecordVariable(recordVar, env); } private boolean validateRecordVariable(BLangRecordVariable recordVar, SymbolEnv env) { BRecordType recordVarType; /* This switch block will resolve the record type of the record variable. For example consider the following - type Foo record {int a, boolean b}; type Bar record {string a, float b}; Foo|Bar {a, b} = foo(); Since the varNode type is a union, the types of 'a' and 'b' will be resolved as follows: Type of 'a' will be a union of the types of field 'a' in both Foo and Bar. i.e. type of 'a' is (int | string) and type of 'b' is (boolean | float). Consider anydata {a, b} = foo(); Here, the type of 'a'and type of 'b' will be both anydata. */ switch (recordVar.type.tag) { case TypeTags.UNION: BUnionType unionType = (BUnionType) recordVar.type; Set<BType> bTypes = types.expandAndGetMemberTypesRecursive(unionType); List<BType> possibleTypes = bTypes.stream() .filter(rec -> doesRecordContainKeys(rec, recordVar.variableList, recordVar.restParam != null)) .collect(Collectors.toList()); if (possibleTypes.isEmpty()) { dlog.error(recordVar.pos, DiagnosticCode.INVALID_RECORD_BINDING_PATTERN, recordVar.type); return false; } if (possibleTypes.size() > 1) { BRecordTypeSymbol recordSymbol = Symbols.createRecordSymbol(0, names.fromString(ANONYMOUS_RECORD_NAME), env.enclPkg.symbol.pkgID, null, env.scope.owner); recordVarType = (BRecordType) symTable.recordType; List<BField> fields = populateAndGetPossibleFieldsForRecVar(recordVar, possibleTypes, recordSymbol); if (recordVar.restParam != null) { LinkedHashSet<BType> memberTypes = possibleTypes.stream() .map(possibleType -> { if (possibleType.tag == TypeTags.RECORD) { return ((BRecordType) possibleType).restFieldType; } else if (possibleType.tag == TypeTags.MAP) { return ((BMapType) possibleType).constraint; } else { return possibleType; } }) .collect(Collectors.toCollection(LinkedHashSet::new)); recordVarType.restFieldType = memberTypes.size() > 1 ? BUnionType.create(null, memberTypes) : memberTypes.iterator().next(); } recordVarType.tsymbol = recordSymbol; recordVarType.fields = fields; recordSymbol.type = recordVarType; break; } if (possibleTypes.get(0).tag == TypeTags.RECORD) { recordVarType = (BRecordType) possibleTypes.get(0); break; } if (possibleTypes.get(0).tag == TypeTags.MAP) { recordVarType = createSameTypedFieldsRecordType(recordVar, ((BMapType) possibleTypes.get(0)).constraint); break; } recordVarType = createSameTypedFieldsRecordType(recordVar, possibleTypes.get(0)); break; case TypeTags.RECORD: recordVarType = (BRecordType) recordVar.type; break; case TypeTags.MAP: recordVarType = createSameTypedFieldsRecordType(recordVar, ((BMapType) recordVar.type).constraint); break; case TypeTags.ANY: case TypeTags.ANYDATA: recordVarType = createSameTypedFieldsRecordType(recordVar, recordVar.type); break; default: dlog.error(recordVar.pos, DiagnosticCode.INVALID_RECORD_BINDING_PATTERN, recordVar.type); return false; } Map<String, BField> recordVarTypeFields = recordVarType.fields.stream() .collect(Collectors.toMap(field -> field.getName().getValue(), field -> field)); boolean validRecord = true; int ignoredCount = 0; for (BLangRecordVariableKeyValue variable : recordVar.variableList) { if (names.fromIdNode(variable.getKey()) == Names.IGNORE) { dlog.error(recordVar.pos, DiagnosticCode.UNDERSCORE_NOT_ALLOWED); continue; } BLangVariable value = variable.getValue(); if (value.getKind() == NodeKind.VARIABLE) { BLangSimpleVariable simpleVar = (BLangSimpleVariable) value; Name varName = names.fromIdNode(simpleVar.name); if (varName == Names.IGNORE) { ignoredCount++; simpleVar.type = symTable.anyType; if (!recordVarTypeFields.containsKey(variable.getKey().getValue())) { continue; } types.checkType(variable.valueBindingPattern.pos, recordVarTypeFields.get((variable.getKey().getValue())).type, simpleVar.type, DiagnosticCode.INCOMPATIBLE_TYPES); continue; } } if (!recordVarTypeFields.containsKey(variable.getKey().getValue())) { if (recordVarType.sealed) { validRecord = false; dlog.error(recordVar.pos, DiagnosticCode.INVALID_FIELD_IN_RECORD_BINDING_PATTERN, variable.getKey().getValue(), recordVar.type); } else { BType restType; if (recordVarType.restFieldType.tag == TypeTags.ANYDATA || recordVarType.restFieldType.tag == TypeTags.ANY) { restType = recordVarType.restFieldType; } else { restType = BUnionType.create(null, recordVarType.restFieldType, symTable.nilType); } value.type = restType; value.accept(this); } continue; } value.type = recordVarTypeFields.get((variable.getKey().getValue())).type; value.accept(this); } if (!recordVar.variableList.isEmpty() && ignoredCount == recordVar.variableList.size() && recordVar.restParam == null) { dlog.error(recordVar.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); return false; } if (recordVar.restParam != null) { ((BLangVariable) recordVar.restParam).type = getRestParamType(recordVarType); symbolEnter.defineNode((BLangNode) recordVar.restParam, env); } return validRecord; } private boolean validateErrorVariable(BLangErrorVariable errorVariable) { BErrorType errorType; switch (errorVariable.type.tag) { case TypeTags.UNION: BUnionType unionType = ((BUnionType) errorVariable.type); List<BErrorType> possibleTypes = unionType.getMemberTypes().stream() .filter(type -> TypeTags.ERROR == type.tag) .map(BErrorType.class::cast) .collect(Collectors.toList()); if (possibleTypes.isEmpty()) { dlog.error(errorVariable.pos, DiagnosticCode.INVALID_ERROR_BINDING_PATTERN, errorVariable.type); return false; } if (possibleTypes.size() > 1) { LinkedHashSet<BType> detailType = new LinkedHashSet<>(); for (BErrorType possibleErrType : possibleTypes) { detailType.add(possibleErrType.detailType); } BType errorDetailType = detailType.size() > 1 ? BUnionType.create(null, detailType) : detailType.iterator().next(); errorType = new BErrorType(null, symTable.stringType, errorDetailType); } else { errorType = possibleTypes.get(0); } break; case TypeTags.ERROR: errorType = (BErrorType) errorVariable.type; break; default: dlog.error(errorVariable.pos, DiagnosticCode.INVALID_ERROR_BINDING_PATTERN, errorVariable.type); return false; } errorVariable.type = errorType; boolean isReasonIgnored = false; BLangSimpleVariable reasonVariable = errorVariable.reason; if (Names.IGNORE == names.fromIdNode(reasonVariable.name)) { reasonVariable.type = symTable.noType; isReasonIgnored = true; } else { errorVariable.reason.type = errorType.reasonType; errorVariable.reason.accept(this); } if (errorVariable.detail == null || (errorVariable.detail.isEmpty() && !isRestDetailBindingAvailable(errorVariable))) { if (isReasonIgnored) { dlog.error(errorVariable.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); return false; } return true; } if (errorType.detailType.getKind() == TypeKind.RECORD) { BRecordType recordType = (BRecordType) errorType.detailType; Map<String, BField> fieldMap = recordType.fields.stream() .collect(Collectors.toMap(f -> f.name.value, f -> f)); for (BLangErrorVariable.BLangErrorDetailEntry errorDetailEntry : errorVariable.detail) { String entryName = errorDetailEntry.key.getValue(); BField entryField = fieldMap.get(entryName); BLangVariable boundVar = errorDetailEntry.valueBindingPattern; if (entryField != null) { if ((entryField.symbol.flags & Flags.OPTIONAL) == Flags.OPTIONAL) { boundVar.type = BUnionType.create(null, entryField.type, symTable.nilType); } else { boundVar.type = entryField.type; } } else { if (recordType.sealed) { dlog.error(errorVariable.pos, DiagnosticCode.INVALID_ERROR_BINDING_PATTERN, errorVariable.type); boundVar.type = symTable.semanticError; return false; } else { boundVar.type = BUnionType.create(null, recordType.restFieldType, symTable.nilType); } } boolean isIgnoredVar = boundVar.getKind() == NodeKind.VARIABLE && ((BLangSimpleVariable) boundVar).name.value.equals(Names.IGNORE.value); if (!isIgnoredVar) { boundVar.accept(this); } } if (isRestDetailBindingAvailable(errorVariable)) { BTypeSymbol typeSymbol = createTypeSymbol(SymTag.TYPE); BMapType restType = new BMapType(TypeTags.MAP, recordType.restFieldType, typeSymbol); typeSymbol.type = restType; errorVariable.restDetail.type = restType; errorVariable.restDetail.accept(this); } return true; } else if (errorType.detailType.getKind() == TypeKind.UNION) { BErrorTypeSymbol errorTypeSymbol = new BErrorTypeSymbol(SymTag.ERROR, Flags.PUBLIC, Names.ERROR, env.enclPkg.packageID, symTable.errorType, env.scope.owner); errorVariable.type = new BErrorType(errorTypeSymbol, symTable.stringType, symTable.detailType); return validateErrorVariable(errorVariable); } if (isRestDetailBindingAvailable(errorVariable)) { errorVariable.restDetail.type = symTable.detailType; errorVariable.restDetail.accept(this); } return true; } private boolean isRestDetailBindingAvailable(BLangErrorVariable errorVariable) { return errorVariable.restDetail != null && !errorVariable.restDetail.name.value.equals(Names.IGNORE.value); } private BTypeSymbol createTypeSymbol(int type) { return new BTypeSymbol(type, Flags.PUBLIC, Names.EMPTY, env.enclPkg.packageID, null, env.scope.owner); } /** * This method will resolve field types based on a list of possible types. * When a record variable has multiple possible assignable types, each field will be a union of the relevant * possible types field type. * * @param recordVar record variable whose fields types are to be resolved * @param possibleTypes list of possible types * @param recordSymbol symbol of the record type to be used in creating fields * @return the list of fields */ private List<BField> populateAndGetPossibleFieldsForRecVar(BLangRecordVariable recordVar, List<BType> possibleTypes, BRecordTypeSymbol recordSymbol) { List<BField> fields = new ArrayList<>(); for (BLangRecordVariableKeyValue bLangRecordVariableKeyValue : recordVar.variableList) { String fieldName = bLangRecordVariableKeyValue.key.value; LinkedHashSet<BType> memberTypes = new LinkedHashSet<>(); for (BType possibleType : possibleTypes) { if (possibleType.tag == TypeTags.RECORD) { BRecordType possibleRecordType = (BRecordType) possibleType; Optional<BField> optionalField = possibleRecordType.fields.stream() .filter(field -> field.getName().getValue().equals(fieldName)) .findFirst(); if (optionalField.isPresent()) { BField bField = optionalField.get(); if (Symbols.isOptional(bField.symbol)) { memberTypes.add(symTable.nilType); } memberTypes.add(bField.type); } else { memberTypes.add(possibleRecordType.restFieldType); memberTypes.add(symTable.nilType); } continue; } if (possibleType.tag == TypeTags.MAP) { BMapType possibleMapType = (BMapType) possibleType; memberTypes.add(possibleMapType.constraint); continue; } memberTypes.add(possibleType); } BType fieldType = memberTypes.size() > 1 ? BUnionType.create(null, memberTypes) : memberTypes.iterator().next(); fields.add(new BField(names.fromString(fieldName), recordVar.pos, new BVarSymbol(0, names.fromString(fieldName), env.enclPkg.symbol.pkgID, fieldType, recordSymbol))); } return fields; }
maybe u could use `Utils.toSqlString`, just rename this function to a proper name
public String toString() { return "CostStateEvent{" + "groupExpression=" + getGroupExpression() + ", cost=" + cost + ", physicalProperties=" + physicalProperties + '}'; }
+ '}';
public String toString() { return Utils.toSqlString("CostStateEvent", "groupExpression", getGroupExpression(), "cost", cost, "physicalProperties", physicalProperties); }
class CostStateUpdateEvent extends StateEvent { private final double cost; private final PhysicalProperties physicalProperties; private CostStateUpdateEvent(GroupExpression groupExpression, double cost, PhysicalProperties physicalProperties) { super(groupExpression); this.cost = cost; this.physicalProperties = physicalProperties; } public static CostStateUpdateEvent of(GroupExpression groupExpression, double cost, PhysicalProperties physicalProperties) { return checkConnectContext() ? new CostStateUpdateEvent(groupExpression, cost, physicalProperties) : null; } @Override }
class CostStateUpdateEvent extends StateEvent { private final double cost; private final PhysicalProperties physicalProperties; private CostStateUpdateEvent(GroupExpression groupExpression, double cost, PhysicalProperties physicalProperties) { super(groupExpression); this.cost = cost; this.physicalProperties = physicalProperties; } public static CostStateUpdateEvent of(GroupExpression groupExpression, double cost, PhysicalProperties physicalProperties) { return checkConnectContext(CostStateUpdateEvent.class) ? new CostStateUpdateEvent(groupExpression, cost, physicalProperties) : null; } @Override }
Why add elements into the same `mysqlTypeCodes` in `PrepareStmt`? Is there any problem lead the list size larger than placeholder number?
private void handleExecute() { packetBuf = packetBuf.order(ByteOrder.LITTLE_ENDIAN); int stmtId = packetBuf.getInt(); packetBuf.get(); packetBuf.getInt(); PrepareStmtContext prepareCtx = ctx.getPreparedStmt(String.valueOf(stmtId)); if (null == prepareCtx) { ctx.getState().setError("msg: Not Found prepared statement, stmtName: " + stmtId); return; } int numParams = prepareCtx.getStmt().getParameters().size(); byte[] nullBitmap = new byte[(numParams + 7) / 8]; packetBuf.get(nullBitmap); try { ctx.setQueryId(UUIDUtil.genUUID()); Integer[] mysqlTypeCodes = new Integer[numParams]; if (packetBuf.hasRemaining() && (int) packetBuf.get() != 0) { IntStream.range(0, numParams) .forEach(i -> prepareCtx.getStmt().addMysqlTypeCodes((int) packetBuf.getChar())); } List<Expr> exprs = new ArrayList<>(); for (int i = 0; i < numParams; ++i) { if (isNull(nullBitmap, i)) { exprs.add(new NullLiteral()); continue; } LiteralExpr l = LiteralExpr.parseLiteral(prepareCtx.getStmt().getMysqlTypeCodes().get(i)); l.parseMysqlParam(packetBuf); exprs.add(l); } ExecuteStmt executeStmt = new ExecuteStmt(String.valueOf(stmtId), exprs); boolean enableAudit = ctx.getSessionVariable().isAuditExecuteStmt(); String originStmt = enableAudit ? executeStmt.toSql() : "/* omit */"; executeStmt.setOrigStmt(new OriginStatement(originStmt, 0)); executor = new StmtExecutor(ctx, executeStmt); ctx.setExecutor(executor); executor.execute(); if (enableAudit) { auditAfterExec(originStmt, executor.getParsedStmt(), executor.getQueryStatisticsForAuditLog()); } } catch (Throwable e) { LOG.warn("Process one query failed because unknown reason: ", e); ctx.getState().setError(e.getClass().getSimpleName() + ", msg: " + e.getMessage()); } }
.forEach(i -> prepareCtx.getStmt().addMysqlTypeCodes((int) packetBuf.getChar()));
private void handleExecute() { packetBuf = packetBuf.order(ByteOrder.LITTLE_ENDIAN); int stmtId = packetBuf.getInt(); packetBuf.get(); packetBuf.getInt(); PrepareStmtContext prepareCtx = ctx.getPreparedStmt(String.valueOf(stmtId)); if (null == prepareCtx) { ctx.getState().setError("msg: Not Found prepared statement, stmtName: " + stmtId); return; } int numParams = prepareCtx.getStmt().getParameters().size(); byte[] nullBitmap = new byte[(numParams + 7) / 8]; packetBuf.get(nullBitmap); try { ctx.setQueryId(UUIDUtil.genUUID()); if (packetBuf.hasRemaining() && (int) packetBuf.get() != 0) { for (int i = 0; i < numParams; ++i) { prepareCtx.getStmt().getMysqlTypeCodes().set(i, (int) packetBuf.getChar()); } } List<Expr> exprs = new ArrayList<>(); for (int i = 0; i < numParams; ++i) { if (isNull(nullBitmap, i)) { exprs.add(new NullLiteral()); continue; } LiteralExpr l = LiteralExpr.parseLiteral(prepareCtx.getStmt().getMysqlTypeCodes().get(i)); l.parseMysqlParam(packetBuf); exprs.add(l); } ExecuteStmt executeStmt = new ExecuteStmt(String.valueOf(stmtId), exprs); boolean enableAudit = ctx.getSessionVariable().isAuditExecuteStmt(); String originStmt = enableAudit ? executeStmt.toSql() : "/* omit */"; executeStmt.setOrigStmt(new OriginStatement(originStmt, 0)); executor = new StmtExecutor(ctx, executeStmt); ctx.setExecutor(executor); executor.execute(); if (enableAudit) { auditAfterExec(originStmt, executor.getParsedStmt(), executor.getQueryStatisticsForAuditLog()); } } catch (Throwable e) { LOG.warn("Process one query failed because unknown reason: ", e); ctx.getState().setError(e.getClass().getSimpleName() + ", msg: " + e.getMessage()); } }
class ConnectProcessor { private static final Logger LOG = LogManager.getLogger(ConnectProcessor.class); protected final ConnectContext ctx; private ByteBuffer packetBuf; protected StmtExecutor executor = null; public ConnectProcessor(ConnectContext context) { this.ctx = context; } private void handleInitDb() { String identifier = new String(packetBuf.array(), 1, packetBuf.limit() - 1); try { String[] parts = identifier.trim().split("\\s+"); if (parts.length == 2) { if (parts[0].equalsIgnoreCase("catalog")) { ctx.changeCatalog(parts[1]); } else if (parts[0].equalsIgnoreCase("warehouse")) { WarehouseManager warehouseMgr = GlobalStateMgr.getCurrentState().getWarehouseMgr(); String newWarehouseName = parts[1]; if (!warehouseMgr.warehouseExists(newWarehouseName)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_WAREHOUSE_ERROR, newWarehouseName); } ctx.setCurrentWarehouse(newWarehouseName); } else { ctx.getState().setError("not supported command"); } } else { ctx.changeCatalogDb(identifier); } } catch (Exception e) { ctx.getState().setError(e.getMessage()); return; } ctx.getState().setOk(); } private void handleQuit() { ctx.setKilled(); ctx.getState().setOk(); } private void handleChangeUser() throws IOException { if (!MysqlProto.changeUser(ctx, packetBuf)) { LOG.warn("Failed to execute command `Change user`."); return; } handleResetConnection(); } private void handleResetConnection() throws IOException { resetConnectionSession(); ctx.getState().setOk(); } private void handlePing() { ctx.getState().setOk(); } private void resetConnectionSession() { ctx.getSerializer().reset(); ctx.getSerializer().setCapability(ctx.getCapability()); ctx.resetSessionVariable(); } public void auditAfterExec(String origStmt, StatementBase parsedStmt, PQueryStatistics statistics) { long endTime = System.currentTimeMillis(); long elapseMs = endTime - ctx.getStartTime(); boolean isForwardToLeader = (executor != null) ? executor.getIsForwardToLeaderOrInit(false) : false; if (ctx.getState().getErrType() == QueryState.ErrType.IGNORE_ERR) { return; } String errorCode = ""; if (ctx.getState().getErrType() != QueryState.ErrType.UNKNOWN) { errorCode = ctx.getState().getErrType().name(); } else if (StringUtils.isNotEmpty(ctx.getErrorCode())) { errorCode = ctx.getErrorCode(); } ctx.getAuditEventBuilder().setEventType(EventType.AFTER_QUERY) .setState(ctx.getState().toString()) .setErrorCode(errorCode) .setQueryTime(elapseMs) .setReturnRows(ctx.getReturnRows()) .setStmtId(ctx.getStmtId()) .setIsForwardToLeader(isForwardToLeader) .setQueryId(ctx.getQueryId() == null ? "NaN" : ctx.getQueryId().toString()); if (statistics != null) { ctx.getAuditEventBuilder().setScanBytes(statistics.scanBytes); ctx.getAuditEventBuilder().setScanRows(statistics.scanRows); ctx.getAuditEventBuilder().setCpuCostNs(statistics.cpuCostNs == null ? -1 : statistics.cpuCostNs); ctx.getAuditEventBuilder().setMemCostBytes(statistics.memCostBytes == null ? -1 : statistics.memCostBytes); ctx.getAuditEventBuilder().setSpilledBytes(statistics.spillBytes == null ? -1 : statistics.spillBytes); ctx.getAuditEventBuilder().setReturnRows(statistics.returnedRows == null ? 0 : statistics.returnedRows); } if (ctx.getState().isQuery()) { MetricRepo.COUNTER_QUERY_ALL.increase(1L); ResourceGroupMetricMgr.increaseQuery(ctx, 1L); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { MetricRepo.COUNTER_QUERY_ERR.increase(1L); ResourceGroupMetricMgr.increaseQueryErr(ctx, 1L); } else { MetricRepo.COUNTER_QUERY_SUCCESS.increase(1L); MetricRepo.HISTO_QUERY_LATENCY.update(elapseMs); ResourceGroupMetricMgr.updateQueryLatency(ctx, elapseMs); if (elapseMs > Config.qe_slow_log_ms || ctx.getSessionVariable().isEnableSQLDigest()) { MetricRepo.COUNTER_SLOW_QUERY.increase(1L); ctx.getAuditEventBuilder().setDigest(computeStatementDigest(parsedStmt)); } } ctx.getAuditEventBuilder().setIsQuery(true); if (ctx.getSessionVariable().isEnableBigQueryLog()) { ctx.getAuditEventBuilder().setBigQueryLogCPUSecondThreshold( ctx.getSessionVariable().getBigQueryLogCPUSecondThreshold()); ctx.getAuditEventBuilder().setBigQueryLogScanBytesThreshold( ctx.getSessionVariable().getBigQueryLogScanBytesThreshold()); ctx.getAuditEventBuilder().setBigQueryLogScanRowsThreshold( ctx.getSessionVariable().getBigQueryLogScanRowsThreshold()); } } else { ctx.getAuditEventBuilder().setIsQuery(false); } ctx.getAuditEventBuilder().setFeIp(FrontendOptions.getLocalHostAddress()); if (!ctx.getState().isQuery() && (parsedStmt != null && parsedStmt.needAuditEncryption())) { ctx.getAuditEventBuilder().setStmt(AstToSQLBuilder.toSQL(parsedStmt)); } else if (parsedStmt == null) { ctx.getAuditEventBuilder().setStmt(origStmt); } else { ctx.getAuditEventBuilder().setStmt(LogUtil.removeLineSeparator(origStmt)); } GlobalStateMgr.getCurrentState().getAuditEventProcessor().handleAuditEvent(ctx.getAuditEventBuilder().build()); } public String computeStatementDigest(StatementBase queryStmt) { if (queryStmt == null) { return ""; } String digest = SqlDigestBuilder.build(queryStmt); try { MessageDigest md = MessageDigest.getInstance("MD5"); md.reset(); md.update(digest.getBytes()); return Hex.encodeHexString(md.digest()); } catch (NoSuchAlgorithmException e) { return ""; } } private boolean containsComment(String sql) { return (sql.contains("--")) || sql.contains(" } protected void addFinishedQueryDetail() { if (!Config.enable_collect_query_detail_info) { return; } QueryDetail queryDetail = ctx.getQueryDetail(); if (queryDetail == null || !queryDetail.getQueryId().equals(DebugUtil.printId(ctx.getQueryId()))) { return; } long endTime = System.currentTimeMillis(); long elapseMs = endTime - ctx.getStartTime(); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { queryDetail.setState(QueryDetail.QueryMemState.FAILED); queryDetail.setErrorMessage(ctx.getState().getErrorMessage()); } else { queryDetail.setState(QueryDetail.QueryMemState.FINISHED); } queryDetail.setEndTime(endTime); queryDetail.setLatency(elapseMs); queryDetail.setResourceGroupName(ctx.getResourceGroup() != null ? ctx.getResourceGroup().getName() : ""); queryDetail.setReturnRows(ctx.getReturnRows()); queryDetail.setDigest(ctx.getAuditEventBuilder().build().digest); PQueryStatistics statistics = executor.getQueryStatisticsForAuditLog(); if (statistics != null) { queryDetail.setScanBytes(statistics.scanBytes); queryDetail.setScanRows(statistics.scanRows); queryDetail.setCpuCostNs(statistics.cpuCostNs == null ? -1 : statistics.cpuCostNs); queryDetail.setMemCostBytes(statistics.memCostBytes == null ? -1 : statistics.memCostBytes); queryDetail.setSpillBytes(statistics.spillBytes == null ? -1 : statistics.spillBytes); } QueryDetailQueue.addQueryDetail(queryDetail); } protected void addRunningQueryDetail(StatementBase parsedStmt) { if (!Config.enable_collect_query_detail_info) { return; } String sql; if (!ctx.getState().isQuery() && parsedStmt.needAuditEncryption()) { sql = AstToSQLBuilder.toSQL(parsedStmt); } else { sql = parsedStmt.getOrigStmt().originStmt; } boolean isQuery = parsedStmt instanceof QueryStatement; QueryDetail queryDetail = new QueryDetail( DebugUtil.printId(ctx.getQueryId()), isQuery, ctx.connectionId, ctx.getMysqlChannel() != null ? ctx.getMysqlChannel().getRemoteIp() : "System", ctx.getStartTime(), -1, -1, QueryDetail.QueryMemState.RUNNING, ctx.getDatabase(), sql, ctx.getQualifiedUser(), Optional.ofNullable(ctx.getResourceGroup()).map(TWorkGroup::getName).orElse("")); ctx.setQueryDetail(queryDetail); QueryDetailQueue.addQueryDetail(queryDetail.copy()); } protected void handleQuery() { MetricRepo.COUNTER_REQUEST_ALL.increase(1L); String originStmt = null; byte[] bytes = packetBuf.array(); int ending = packetBuf.limit() - 1; while (ending >= 1 && bytes[ending] == '\0') { ending--; } originStmt = new String(bytes, 1, ending, StandardCharsets.UTF_8); ctx.getAuditEventBuilder().reset(); ctx.getAuditEventBuilder() .setTimestamp(System.currentTimeMillis()) .setClientIp(ctx.getMysqlChannel().getRemoteHostPortString()) .setUser(ctx.getQualifiedUser()) .setAuthorizedUser( ctx.getCurrentUserIdentity() == null ? "null" : ctx.getCurrentUserIdentity().toString()) .setDb(ctx.getDatabase()) .setCatalog(ctx.getCurrentCatalog()); Tracers.register(ctx); StatementBase parsedStmt = null; try { ctx.setQueryId(UUIDUtil.genUUID()); List<StatementBase> stmts; try { stmts = com.starrocks.sql.parser.SqlParser.parse(originStmt, ctx.getSessionVariable()); } catch (ParsingException parsingException) { throw new AnalysisException(parsingException.getMessage()); } for (int i = 0; i < stmts.size(); ++i) { ctx.getState().reset(); if (i > 0) { ctx.resetReturnRows(); ctx.setQueryId(UUIDUtil.genUUID()); } parsedStmt = stmts.get(i); if (ctx.getCommand() == MysqlCommand.COM_STMT_PREPARE && !(parsedStmt instanceof PrepareStmt)) { parsedStmt = new PrepareStmt("", parsedStmt, new ArrayList<>()); } if (ctx.getCommand() == MysqlCommand.COM_STMT_PREPARE && (parsedStmt instanceof PrepareStmt)) { ((PrepareStmt) parsedStmt).setName(String.valueOf(ctx.getStmtId())); if (!(((PrepareStmt) parsedStmt).getInnerStmt() instanceof QueryStatement)) { throw new AnalysisException("prepare statement only support QueryStatement"); } } parsedStmt.setOrigStmt(new OriginStatement(originStmt, i)); Tracers.init(ctx, parsedStmt.getTraceMode(), parsedStmt.getTraceModule()); if (i == stmts.size() - 1) { addRunningQueryDetail(parsedStmt); } executor = new StmtExecutor(ctx, parsedStmt); ctx.setExecutor(executor); ctx.setIsLastStmt(i == stmts.size() - 1); executor.execute(); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { break; } if (i != stmts.size() - 1) { ctx.getState().serverStatus |= MysqlServerStatusFlag.SERVER_MORE_RESULTS_EXISTS; finalizeCommand(); } } } catch (AnalysisException e) { LOG.warn("Failed to parse SQL: " + originStmt + ", because.", e); ctx.getState().setError(e.getMessage()); ctx.getState().setErrType(QueryState.ErrType.ANALYSIS_ERR); } catch (Throwable e) { LOG.warn("Process one query failed. SQL: " + originStmt + ", because unknown reason: ", e); ctx.getState().setError("Unexpected exception: " + e.getMessage()); ctx.getState().setErrType(QueryState.ErrType.INTERNAL_ERR); } finally { Tracers.close(); } if (executor != null) { auditAfterExec(originStmt, executor.getParsedStmt(), executor.getQueryStatisticsForAuditLog()); } else { auditAfterExec(originStmt, null, null); } addFinishedQueryDetail(); } private void handleFieldList() throws IOException { String tableName = new String(MysqlProto.readNulTerminateString(packetBuf), StandardCharsets.UTF_8); if (Strings.isNullOrEmpty(tableName)) { ctx.getState().setError("Empty tableName"); return; } Database db = ctx.getGlobalStateMgr().getMetadataMgr().getDb(ctx.getCurrentCatalog(), ctx.getDatabase()); if (db == null) { ctx.getState().setError("Unknown database(" + ctx.getDatabase() + ")"); return; } Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { Table table = ctx.getGlobalStateMgr().getMetadataMgr().getTable( ctx.getCurrentCatalog(), ctx.getDatabase(), tableName); if (table == null) { ctx.getState().setError("Unknown table(" + tableName + ")"); return; } MysqlSerializer serializer = ctx.getSerializer(); MysqlChannel channel = ctx.getMysqlChannel(); List<Column> baseSchema = table.getBaseSchema(); for (Column column : baseSchema) { serializer.reset(); serializer.writeField(db.getOriginName(), table.getName(), column, true); channel.sendOnePacket(serializer.toByteBuffer()); } } catch (StarRocksConnectorException e) { LOG.error("errors happened when getting table {}", tableName, e); } finally { locker.unLockDatabase(db, LockType.READ); } ctx.getState().setEof(); } private void handleStmtReset() { ctx.getState().setOk(); } private void handleStmtClose() { int stmtId = packetBuf.getInt(); ctx.removePreparedStmt(String.valueOf(stmtId)); ctx.getState().setStateType(QueryState.MysqlStateType.NOOP); } private static boolean isNull(byte[] bitmap, int position) { return (bitmap[position / 8] & (0xff & (1 << (position & 7)))) != 0; } private void dispatch() throws IOException { int code = packetBuf.get(); MysqlCommand command = MysqlCommand.fromCode(code); if (command == null) { ErrorReport.report(ErrorCode.ERR_UNKNOWN_COM_ERROR); ctx.getState().setError("Unknown command(" + command + ")"); LOG.debug("Unknown MySQL protocol command"); return; } ctx.setCommand(command); ctx.setStartTime(); ctx.setResourceGroup(null); ctx.setErrorCode(""); switch (command) { case COM_INIT_DB: handleInitDb(); break; case COM_QUIT: handleQuit(); break; case COM_QUERY: case COM_STMT_PREPARE: handleQuery(); ctx.setStartTime(); break; case COM_STMT_RESET: handleStmtReset(); break; case COM_STMT_CLOSE: handleStmtClose(); break; case COM_FIELD_LIST: handleFieldList(); break; case COM_CHANGE_USER: handleChangeUser(); break; case COM_RESET_CONNECTION: handleResetConnection(); break; case COM_PING: handlePing(); break; case COM_STMT_EXECUTE: handleExecute(); break; default: ctx.getState().setError("Unsupported command(" + command + ")"); LOG.debug("Unsupported command: {}", command); break; } } private ByteBuffer getResultPacket() { MysqlPacket packet = ctx.getState().toResponsePacket(); if (packet == null) { return null; } MysqlSerializer serializer = ctx.getSerializer(); serializer.reset(); packet.writeTo(serializer); return serializer.toByteBuffer(); } private void finalizeCommand() throws IOException { ByteBuffer packet = null; if (executor != null && executor.isForwardToLeader()) { if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { packet = executor.getOutputPacket(); if (packet == null) { packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } } else { ShowResultSet resultSet = executor.getShowResultSet(); if (resultSet == null) { if (executor.sendResultToChannel(ctx.getMysqlChannel())) { packet = getResultPacket(); } else { packet = executor.getOutputPacket(); } } else { executor.sendShowResult(resultSet); packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } } } else { packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } MysqlChannel channel = ctx.getMysqlChannel(); channel.sendAndFlush(packet); if (ctx.getCommand() == MysqlCommand.COM_QUERY) { ctx.setLastQueryId(ctx.queryId); ctx.setQueryId(null); } } public TMasterOpResult proxyExecute(TMasterOpRequest request) { ctx.setCurrentCatalog(request.catalog); if (ctx.getCurrentCatalog() == null) { TMasterOpResult result = new TMasterOpResult(); ctx.getState().setError( "Missing current catalog. You need to upgrade this Frontend to the same version as Leader Frontend."); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); result.setPacket(getResultPacket()); return result; } ctx.setDatabase(request.db); ctx.setQualifiedUser(request.user); ctx.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); ctx.getState().reset(); if (request.isSetResourceInfo()) { ctx.getSessionVariable().setResourceGroup(request.getResourceInfo().getGroup()); } if (request.isSetUser_ip()) { ctx.setRemoteIP(request.getUser_ip()); } if (request.isSetTime_zone()) { ctx.getSessionVariable().setTimeZone(request.getTime_zone()); } if (request.isSetStmt_id()) { ctx.setForwardedStmtId(request.getStmt_id()); } if (request.isSetSqlMode()) { ctx.getSessionVariable().setSqlMode(request.sqlMode); } if (request.isSetEnableStrictMode()) { ctx.getSessionVariable().setEnableInsertStrict(request.enableStrictMode); } if (request.isSetCurrent_user_ident()) { UserIdentity currentUserIdentity = UserIdentity.fromThrift(request.getCurrent_user_ident()); ctx.setCurrentUserIdentity(currentUserIdentity); } if (request.isSetUser_roles()) { List<Long> roleIds = request.getUser_roles().getRole_id_list(); ctx.setCurrentRoleIds(new HashSet<>(roleIds)); } else { ctx.setCurrentRoleIds(new HashSet<>()); } if (request.isSetIsLastStmt()) { ctx.setIsLastStmt(request.isIsLastStmt()); } else { ctx.setIsLastStmt(true); } if (request.isSetQuery_options()) { TQueryOptions queryOptions = request.getQuery_options(); if (queryOptions.isSetMem_limit()) { ctx.getSessionVariable().setMaxExecMemByte(queryOptions.getMem_limit()); } if (queryOptions.isSetQuery_timeout()) { ctx.getSessionVariable().setQueryTimeoutS(queryOptions.getQuery_timeout()); } if (queryOptions.isSetLoad_mem_limit()) { ctx.getSessionVariable().setLoadMemLimit(queryOptions.getLoad_mem_limit()); } if (queryOptions.isSetMax_scan_key_num()) { ctx.getSessionVariable().setMaxScanKeyNum(queryOptions.getMax_scan_key_num()); } if (queryOptions.isSetMax_pushdown_conditions_per_column()) { ctx.getSessionVariable().setMaxPushdownConditionsPerColumn( queryOptions.getMax_pushdown_conditions_per_column()); } } else { if (request.isSetExecMemLimit()) { ctx.getSessionVariable().setMaxExecMemByte(request.getExecMemLimit()); } if (request.isSetQueryTimeout()) { ctx.getSessionVariable().setQueryTimeoutS(request.getQueryTimeout()); } if (request.isSetLoadMemLimit()) { ctx.getSessionVariable().setLoadMemLimit(request.loadMemLimit); } } if (request.isSetQueryId()) { ctx.setQueryId(UUIDUtil.fromTUniqueid(request.getQueryId())); } if (request.isSetForward_times()) { ctx.setForwardTimes(request.getForward_times()); } ctx.setThreadLocalInfo(); if (ctx.getCurrentUserIdentity() == null) { TMasterOpResult result = new TMasterOpResult(); ctx.getState().setError( "Missing current user identity. You need to upgrade this Frontend to the same version as Leader Frontend."); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); result.setPacket(getResultPacket()); return result; } StmtExecutor executor = null; try { if (request.isSetModified_variables_sql()) { LOG.info("Set session variables first: {}", request.modified_variables_sql); new StmtExecutor(ctx, new OriginStatement(request.modified_variables_sql, 0), true).execute(); } int idx = request.isSetStmtIdx() ? request.getStmtIdx() : 0; executor = new StmtExecutor(ctx, new OriginStatement(request.getSql(), idx), true); executor.execute(); } catch (IOException e) { LOG.warn("Process one query failed because IOException: ", e); ctx.getState().setError("StarRocks process failed: " + e.getMessage()); } catch (Throwable e) { LOG.warn("Process one query failed because unknown reason: ", e); ctx.getState().setError("Unexpected exception: " + e.getMessage()); } if (executor != null && executor.getIsForwardToLeaderOrInit(false)) { return executor.getLeaderOpExecutor().getResult(); } TMasterOpResult result = new TMasterOpResult(); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); if (!ctx.getIsLastStmt() && ctx.getState().getStateType() != QueryState.MysqlStateType.ERR) { ctx.getState().serverStatus |= MysqlServerStatusFlag.SERVER_MORE_RESULTS_EXISTS; } result.setPacket(getResultPacket()); result.setState(ctx.getState().getStateType().toString()); if (executor != null) { if (executor.getProxyResultSet() != null) { result.setResultSet(executor.getProxyResultSet().tothrift()); } else if (executor.getProxyResultBuffer() != null) { result.setChannelBufferList(executor.getProxyResultBuffer()); } String resourceGroupName = ctx.getAuditEventBuilder().build().resourceGroup; if (StringUtils.isNotEmpty(resourceGroupName)) { result.setResource_group_name(resourceGroupName); } PQueryStatistics audit = executor.getQueryStatisticsForAuditLog(); if (audit != null) { result.setAudit_statistics(AuditStatisticsUtil.toThrift(audit)); } } return result; } public void processOnce() throws IOException { ctx.getState().reset(); executor = null; final MysqlChannel channel = ctx.getMysqlChannel(); channel.setSequenceId(0); try { packetBuf = channel.fetchOnePacket(); if (packetBuf == null) { throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet."); } } catch (AsynchronousCloseException e) { return; } dispatch(); finalizeCommand(); ctx.setCommand(MysqlCommand.COM_SLEEP); } public void loop() { while (!ctx.isKilled()) { try { processOnce(); } catch (RpcException rpce) { LOG.debug("Exception happened in one session(" + ctx + ").", rpce); ctx.setKilled(); break; } catch (Exception e) { LOG.warn("Exception happened in one seesion(" + ctx + ").", e); ctx.setKilled(); break; } } } }
class ConnectProcessor { private static final Logger LOG = LogManager.getLogger(ConnectProcessor.class); protected final ConnectContext ctx; private ByteBuffer packetBuf; protected StmtExecutor executor = null; public ConnectProcessor(ConnectContext context) { this.ctx = context; } private void handleInitDb() { String identifier = new String(packetBuf.array(), 1, packetBuf.limit() - 1); try { String[] parts = identifier.trim().split("\\s+"); if (parts.length == 2) { if (parts[0].equalsIgnoreCase("catalog")) { ctx.changeCatalog(parts[1]); } else if (parts[0].equalsIgnoreCase("warehouse")) { WarehouseManager warehouseMgr = GlobalStateMgr.getCurrentState().getWarehouseMgr(); String newWarehouseName = parts[1]; if (!warehouseMgr.warehouseExists(newWarehouseName)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_WAREHOUSE_ERROR, newWarehouseName); } ctx.setCurrentWarehouse(newWarehouseName); } else { ctx.getState().setError("not supported command"); } } else { ctx.changeCatalogDb(identifier); } } catch (Exception e) { ctx.getState().setError(e.getMessage()); return; } ctx.getState().setOk(); } private void handleQuit() { ctx.setKilled(); ctx.getState().setOk(); } private void handleChangeUser() throws IOException { if (!MysqlProto.changeUser(ctx, packetBuf)) { LOG.warn("Failed to execute command `Change user`."); return; } handleResetConnection(); } private void handleResetConnection() throws IOException { resetConnectionSession(); ctx.getState().setOk(); } private void handlePing() { ctx.getState().setOk(); } private void resetConnectionSession() { ctx.getSerializer().reset(); ctx.getSerializer().setCapability(ctx.getCapability()); ctx.resetSessionVariable(); } public void auditAfterExec(String origStmt, StatementBase parsedStmt, PQueryStatistics statistics) { long endTime = System.currentTimeMillis(); long elapseMs = endTime - ctx.getStartTime(); boolean isForwardToLeader = (executor != null) ? executor.getIsForwardToLeaderOrInit(false) : false; if (ctx.getState().getErrType() == QueryState.ErrType.IGNORE_ERR) { return; } String errorCode = ""; if (ctx.getState().getErrType() != QueryState.ErrType.UNKNOWN) { errorCode = ctx.getState().getErrType().name(); } else if (StringUtils.isNotEmpty(ctx.getErrorCode())) { errorCode = ctx.getErrorCode(); } ctx.getAuditEventBuilder().setEventType(EventType.AFTER_QUERY) .setState(ctx.getState().toString()) .setErrorCode(errorCode) .setQueryTime(elapseMs) .setReturnRows(ctx.getReturnRows()) .setStmtId(ctx.getStmtId()) .setIsForwardToLeader(isForwardToLeader) .setQueryId(ctx.getQueryId() == null ? "NaN" : ctx.getQueryId().toString()); if (statistics != null) { ctx.getAuditEventBuilder().setScanBytes(statistics.scanBytes); ctx.getAuditEventBuilder().setScanRows(statistics.scanRows); ctx.getAuditEventBuilder().setCpuCostNs(statistics.cpuCostNs == null ? -1 : statistics.cpuCostNs); ctx.getAuditEventBuilder().setMemCostBytes(statistics.memCostBytes == null ? -1 : statistics.memCostBytes); ctx.getAuditEventBuilder().setSpilledBytes(statistics.spillBytes == null ? -1 : statistics.spillBytes); ctx.getAuditEventBuilder().setReturnRows(statistics.returnedRows == null ? 0 : statistics.returnedRows); } if (ctx.getState().isQuery()) { MetricRepo.COUNTER_QUERY_ALL.increase(1L); ResourceGroupMetricMgr.increaseQuery(ctx, 1L); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { MetricRepo.COUNTER_QUERY_ERR.increase(1L); ResourceGroupMetricMgr.increaseQueryErr(ctx, 1L); } else { MetricRepo.COUNTER_QUERY_SUCCESS.increase(1L); MetricRepo.HISTO_QUERY_LATENCY.update(elapseMs); ResourceGroupMetricMgr.updateQueryLatency(ctx, elapseMs); if (elapseMs > Config.qe_slow_log_ms || ctx.getSessionVariable().isEnableSQLDigest()) { MetricRepo.COUNTER_SLOW_QUERY.increase(1L); ctx.getAuditEventBuilder().setDigest(computeStatementDigest(parsedStmt)); } } ctx.getAuditEventBuilder().setIsQuery(true); if (ctx.getSessionVariable().isEnableBigQueryLog()) { ctx.getAuditEventBuilder().setBigQueryLogCPUSecondThreshold( ctx.getSessionVariable().getBigQueryLogCPUSecondThreshold()); ctx.getAuditEventBuilder().setBigQueryLogScanBytesThreshold( ctx.getSessionVariable().getBigQueryLogScanBytesThreshold()); ctx.getAuditEventBuilder().setBigQueryLogScanRowsThreshold( ctx.getSessionVariable().getBigQueryLogScanRowsThreshold()); } } else { ctx.getAuditEventBuilder().setIsQuery(false); } ctx.getAuditEventBuilder().setFeIp(FrontendOptions.getLocalHostAddress()); if (!ctx.getState().isQuery() && (parsedStmt != null && parsedStmt.needAuditEncryption())) { ctx.getAuditEventBuilder().setStmt(AstToSQLBuilder.toSQL(parsedStmt)); } else if (parsedStmt == null) { ctx.getAuditEventBuilder().setStmt(origStmt); } else { ctx.getAuditEventBuilder().setStmt(LogUtil.removeLineSeparator(origStmt)); } GlobalStateMgr.getCurrentState().getAuditEventProcessor().handleAuditEvent(ctx.getAuditEventBuilder().build()); } public String computeStatementDigest(StatementBase queryStmt) { if (queryStmt == null) { return ""; } String digest = SqlDigestBuilder.build(queryStmt); try { MessageDigest md = MessageDigest.getInstance("MD5"); md.reset(); md.update(digest.getBytes()); return Hex.encodeHexString(md.digest()); } catch (NoSuchAlgorithmException e) { return ""; } } private boolean containsComment(String sql) { return (sql.contains("--")) || sql.contains(" } protected void addFinishedQueryDetail() { if (!Config.enable_collect_query_detail_info) { return; } QueryDetail queryDetail = ctx.getQueryDetail(); if (queryDetail == null || !queryDetail.getQueryId().equals(DebugUtil.printId(ctx.getQueryId()))) { return; } long endTime = System.currentTimeMillis(); long elapseMs = endTime - ctx.getStartTime(); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { queryDetail.setState(QueryDetail.QueryMemState.FAILED); queryDetail.setErrorMessage(ctx.getState().getErrorMessage()); } else { queryDetail.setState(QueryDetail.QueryMemState.FINISHED); } queryDetail.setEndTime(endTime); queryDetail.setLatency(elapseMs); queryDetail.setResourceGroupName(ctx.getResourceGroup() != null ? ctx.getResourceGroup().getName() : ""); queryDetail.setReturnRows(ctx.getReturnRows()); queryDetail.setDigest(ctx.getAuditEventBuilder().build().digest); PQueryStatistics statistics = executor.getQueryStatisticsForAuditLog(); if (statistics != null) { queryDetail.setScanBytes(statistics.scanBytes); queryDetail.setScanRows(statistics.scanRows); queryDetail.setCpuCostNs(statistics.cpuCostNs == null ? -1 : statistics.cpuCostNs); queryDetail.setMemCostBytes(statistics.memCostBytes == null ? -1 : statistics.memCostBytes); queryDetail.setSpillBytes(statistics.spillBytes == null ? -1 : statistics.spillBytes); } QueryDetailQueue.addQueryDetail(queryDetail); } protected void addRunningQueryDetail(StatementBase parsedStmt) { if (!Config.enable_collect_query_detail_info) { return; } String sql; if (!ctx.getState().isQuery() && parsedStmt.needAuditEncryption()) { sql = AstToSQLBuilder.toSQL(parsedStmt); } else { sql = parsedStmt.getOrigStmt().originStmt; } boolean isQuery = parsedStmt instanceof QueryStatement; QueryDetail queryDetail = new QueryDetail( DebugUtil.printId(ctx.getQueryId()), isQuery, ctx.connectionId, ctx.getMysqlChannel() != null ? ctx.getMysqlChannel().getRemoteIp() : "System", ctx.getStartTime(), -1, -1, QueryDetail.QueryMemState.RUNNING, ctx.getDatabase(), sql, ctx.getQualifiedUser(), Optional.ofNullable(ctx.getResourceGroup()).map(TWorkGroup::getName).orElse("")); ctx.setQueryDetail(queryDetail); QueryDetailQueue.addQueryDetail(queryDetail.copy()); } protected void handleQuery() { MetricRepo.COUNTER_REQUEST_ALL.increase(1L); String originStmt = null; byte[] bytes = packetBuf.array(); int ending = packetBuf.limit() - 1; while (ending >= 1 && bytes[ending] == '\0') { ending--; } originStmt = new String(bytes, 1, ending, StandardCharsets.UTF_8); ctx.getAuditEventBuilder().reset(); ctx.getAuditEventBuilder() .setTimestamp(System.currentTimeMillis()) .setClientIp(ctx.getMysqlChannel().getRemoteHostPortString()) .setUser(ctx.getQualifiedUser()) .setAuthorizedUser( ctx.getCurrentUserIdentity() == null ? "null" : ctx.getCurrentUserIdentity().toString()) .setDb(ctx.getDatabase()) .setCatalog(ctx.getCurrentCatalog()); Tracers.register(ctx); StatementBase parsedStmt = null; try { ctx.setQueryId(UUIDUtil.genUUID()); List<StatementBase> stmts; try { stmts = com.starrocks.sql.parser.SqlParser.parse(originStmt, ctx.getSessionVariable()); } catch (ParsingException parsingException) { throw new AnalysisException(parsingException.getMessage()); } for (int i = 0; i < stmts.size(); ++i) { ctx.getState().reset(); if (i > 0) { ctx.resetReturnRows(); ctx.setQueryId(UUIDUtil.genUUID()); } parsedStmt = stmts.get(i); if (ctx.getCommand() == MysqlCommand.COM_STMT_PREPARE && !(parsedStmt instanceof PrepareStmt)) { parsedStmt = new PrepareStmt("", parsedStmt, new ArrayList<>()); } if (ctx.getCommand() == MysqlCommand.COM_STMT_PREPARE && (parsedStmt instanceof PrepareStmt)) { ((PrepareStmt) parsedStmt).setName(String.valueOf(ctx.getStmtId())); if (!(((PrepareStmt) parsedStmt).getInnerStmt() instanceof QueryStatement)) { throw new AnalysisException("prepare statement only support QueryStatement"); } } parsedStmt.setOrigStmt(new OriginStatement(originStmt, i)); Tracers.init(ctx, parsedStmt.getTraceMode(), parsedStmt.getTraceModule()); if (i == stmts.size() - 1) { addRunningQueryDetail(parsedStmt); } executor = new StmtExecutor(ctx, parsedStmt); ctx.setExecutor(executor); ctx.setIsLastStmt(i == stmts.size() - 1); executor.execute(); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { break; } if (i != stmts.size() - 1) { ctx.getState().serverStatus |= MysqlServerStatusFlag.SERVER_MORE_RESULTS_EXISTS; finalizeCommand(); } } } catch (AnalysisException e) { LOG.warn("Failed to parse SQL: " + originStmt + ", because.", e); ctx.getState().setError(e.getMessage()); ctx.getState().setErrType(QueryState.ErrType.ANALYSIS_ERR); } catch (Throwable e) { LOG.warn("Process one query failed. SQL: " + originStmt + ", because unknown reason: ", e); ctx.getState().setError("Unexpected exception: " + e.getMessage()); ctx.getState().setErrType(QueryState.ErrType.INTERNAL_ERR); } finally { Tracers.close(); } if (executor != null) { auditAfterExec(originStmt, executor.getParsedStmt(), executor.getQueryStatisticsForAuditLog()); } else { auditAfterExec(originStmt, null, null); } addFinishedQueryDetail(); } private void handleFieldList() throws IOException { String tableName = new String(MysqlProto.readNulTerminateString(packetBuf), StandardCharsets.UTF_8); if (Strings.isNullOrEmpty(tableName)) { ctx.getState().setError("Empty tableName"); return; } Database db = ctx.getGlobalStateMgr().getMetadataMgr().getDb(ctx.getCurrentCatalog(), ctx.getDatabase()); if (db == null) { ctx.getState().setError("Unknown database(" + ctx.getDatabase() + ")"); return; } Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { Table table = ctx.getGlobalStateMgr().getMetadataMgr().getTable( ctx.getCurrentCatalog(), ctx.getDatabase(), tableName); if (table == null) { ctx.getState().setError("Unknown table(" + tableName + ")"); return; } MysqlSerializer serializer = ctx.getSerializer(); MysqlChannel channel = ctx.getMysqlChannel(); List<Column> baseSchema = table.getBaseSchema(); for (Column column : baseSchema) { serializer.reset(); serializer.writeField(db.getOriginName(), table.getName(), column, true); channel.sendOnePacket(serializer.toByteBuffer()); } } catch (StarRocksConnectorException e) { LOG.error("errors happened when getting table {}", tableName, e); } finally { locker.unLockDatabase(db, LockType.READ); } ctx.getState().setEof(); } private void handleStmtReset() { ctx.getState().setOk(); } private void handleStmtClose() { int stmtId = packetBuf.getInt(); ctx.removePreparedStmt(String.valueOf(stmtId)); ctx.getState().setStateType(QueryState.MysqlStateType.NOOP); } private static boolean isNull(byte[] bitmap, int position) { return (bitmap[position / 8] & (0xff & (1 << (position & 7)))) != 0; } private void dispatch() throws IOException { int code = packetBuf.get(); MysqlCommand command = MysqlCommand.fromCode(code); if (command == null) { ErrorReport.report(ErrorCode.ERR_UNKNOWN_COM_ERROR); ctx.getState().setError("Unknown command(" + command + ")"); LOG.debug("Unknown MySQL protocol command"); return; } ctx.setCommand(command); ctx.setStartTime(); ctx.setResourceGroup(null); ctx.setErrorCode(""); switch (command) { case COM_INIT_DB: handleInitDb(); break; case COM_QUIT: handleQuit(); break; case COM_QUERY: case COM_STMT_PREPARE: handleQuery(); ctx.setStartTime(); break; case COM_STMT_RESET: handleStmtReset(); break; case COM_STMT_CLOSE: handleStmtClose(); break; case COM_FIELD_LIST: handleFieldList(); break; case COM_CHANGE_USER: handleChangeUser(); break; case COM_RESET_CONNECTION: handleResetConnection(); break; case COM_PING: handlePing(); break; case COM_STMT_EXECUTE: handleExecute(); break; default: ctx.getState().setError("Unsupported command(" + command + ")"); LOG.debug("Unsupported command: {}", command); break; } } private ByteBuffer getResultPacket() { MysqlPacket packet = ctx.getState().toResponsePacket(); if (packet == null) { return null; } MysqlSerializer serializer = ctx.getSerializer(); serializer.reset(); packet.writeTo(serializer); return serializer.toByteBuffer(); } private void finalizeCommand() throws IOException { ByteBuffer packet = null; if (executor != null && executor.isForwardToLeader()) { if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { packet = executor.getOutputPacket(); if (packet == null) { packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } } else { ShowResultSet resultSet = executor.getShowResultSet(); if (resultSet == null) { if (executor.sendResultToChannel(ctx.getMysqlChannel())) { packet = getResultPacket(); } else { packet = executor.getOutputPacket(); } } else { executor.sendShowResult(resultSet); packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } } } else { packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } MysqlChannel channel = ctx.getMysqlChannel(); channel.sendAndFlush(packet); if (ctx.getCommand() == MysqlCommand.COM_QUERY) { ctx.setLastQueryId(ctx.queryId); ctx.setQueryId(null); } } public TMasterOpResult proxyExecute(TMasterOpRequest request) { ctx.setCurrentCatalog(request.catalog); if (ctx.getCurrentCatalog() == null) { TMasterOpResult result = new TMasterOpResult(); ctx.getState().setError( "Missing current catalog. You need to upgrade this Frontend to the same version as Leader Frontend."); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); result.setPacket(getResultPacket()); return result; } ctx.setDatabase(request.db); ctx.setQualifiedUser(request.user); ctx.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); ctx.getState().reset(); if (request.isSetResourceInfo()) { ctx.getSessionVariable().setResourceGroup(request.getResourceInfo().getGroup()); } if (request.isSetUser_ip()) { ctx.setRemoteIP(request.getUser_ip()); } if (request.isSetTime_zone()) { ctx.getSessionVariable().setTimeZone(request.getTime_zone()); } if (request.isSetStmt_id()) { ctx.setForwardedStmtId(request.getStmt_id()); } if (request.isSetSqlMode()) { ctx.getSessionVariable().setSqlMode(request.sqlMode); } if (request.isSetEnableStrictMode()) { ctx.getSessionVariable().setEnableInsertStrict(request.enableStrictMode); } if (request.isSetCurrent_user_ident()) { UserIdentity currentUserIdentity = UserIdentity.fromThrift(request.getCurrent_user_ident()); ctx.setCurrentUserIdentity(currentUserIdentity); } if (request.isSetUser_roles()) { List<Long> roleIds = request.getUser_roles().getRole_id_list(); ctx.setCurrentRoleIds(new HashSet<>(roleIds)); } else { ctx.setCurrentRoleIds(new HashSet<>()); } if (request.isSetIsLastStmt()) { ctx.setIsLastStmt(request.isIsLastStmt()); } else { ctx.setIsLastStmt(true); } if (request.isSetQuery_options()) { TQueryOptions queryOptions = request.getQuery_options(); if (queryOptions.isSetMem_limit()) { ctx.getSessionVariable().setMaxExecMemByte(queryOptions.getMem_limit()); } if (queryOptions.isSetQuery_timeout()) { ctx.getSessionVariable().setQueryTimeoutS(queryOptions.getQuery_timeout()); } if (queryOptions.isSetLoad_mem_limit()) { ctx.getSessionVariable().setLoadMemLimit(queryOptions.getLoad_mem_limit()); } if (queryOptions.isSetMax_scan_key_num()) { ctx.getSessionVariable().setMaxScanKeyNum(queryOptions.getMax_scan_key_num()); } if (queryOptions.isSetMax_pushdown_conditions_per_column()) { ctx.getSessionVariable().setMaxPushdownConditionsPerColumn( queryOptions.getMax_pushdown_conditions_per_column()); } } else { if (request.isSetExecMemLimit()) { ctx.getSessionVariable().setMaxExecMemByte(request.getExecMemLimit()); } if (request.isSetQueryTimeout()) { ctx.getSessionVariable().setQueryTimeoutS(request.getQueryTimeout()); } if (request.isSetLoadMemLimit()) { ctx.getSessionVariable().setLoadMemLimit(request.loadMemLimit); } } if (request.isSetQueryId()) { ctx.setQueryId(UUIDUtil.fromTUniqueid(request.getQueryId())); } if (request.isSetForward_times()) { ctx.setForwardTimes(request.getForward_times()); } ctx.setThreadLocalInfo(); if (ctx.getCurrentUserIdentity() == null) { TMasterOpResult result = new TMasterOpResult(); ctx.getState().setError( "Missing current user identity. You need to upgrade this Frontend to the same version as Leader Frontend."); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); result.setPacket(getResultPacket()); return result; } StmtExecutor executor = null; try { if (request.isSetModified_variables_sql()) { LOG.info("Set session variables first: {}", request.modified_variables_sql); StatementBase statement = SqlParser.parseSingleStatement(request.modified_variables_sql, ctx.getSessionVariable().getSqlMode()); executor = new StmtExecutor(ctx, statement); executor.setProxy(); executor.execute(); } int idx = request.isSetStmtIdx() ? request.getStmtIdx() : 0; List<StatementBase> stmts = SqlParser.parse(request.getSql(), ctx.getSessionVariable()); StatementBase statement = stmts.get(idx); executor = new StmtExecutor(ctx, statement); executor.setProxy(); executor.execute(); } catch (IOException e) { LOG.warn("Process one query failed because IOException: ", e); ctx.getState().setError("StarRocks process failed: " + e.getMessage()); } catch (Throwable e) { LOG.warn("Process one query failed because unknown reason: ", e); ctx.getState().setError("Unexpected exception: " + e.getMessage()); } if (executor != null && executor.getIsForwardToLeaderOrInit(false)) { return executor.getLeaderOpExecutor().getResult(); } TMasterOpResult result = new TMasterOpResult(); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); if (!ctx.getIsLastStmt() && ctx.getState().getStateType() != QueryState.MysqlStateType.ERR) { ctx.getState().serverStatus |= MysqlServerStatusFlag.SERVER_MORE_RESULTS_EXISTS; } result.setPacket(getResultPacket()); result.setState(ctx.getState().getStateType().toString()); if (executor != null) { if (executor.getProxyResultSet() != null) { result.setResultSet(executor.getProxyResultSet().tothrift()); } else if (executor.getProxyResultBuffer() != null) { result.setChannelBufferList(executor.getProxyResultBuffer()); } String resourceGroupName = ctx.getAuditEventBuilder().build().resourceGroup; if (StringUtils.isNotEmpty(resourceGroupName)) { result.setResource_group_name(resourceGroupName); } PQueryStatistics audit = executor.getQueryStatisticsForAuditLog(); if (audit != null) { result.setAudit_statistics(AuditStatisticsUtil.toThrift(audit)); } } return result; } public void processOnce() throws IOException { ctx.getState().reset(); executor = null; final MysqlChannel channel = ctx.getMysqlChannel(); channel.setSequenceId(0); try { packetBuf = channel.fetchOnePacket(); if (packetBuf == null) { throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet."); } } catch (AsynchronousCloseException e) { return; } dispatch(); finalizeCommand(); ctx.setCommand(MysqlCommand.COM_SLEEP); } public void loop() { while (!ctx.isKilled()) { try { processOnce(); } catch (RpcException rpce) { LOG.debug("Exception happened in one session(" + ctx + ").", rpce); ctx.setKilled(); break; } catch (Exception e) { LOG.warn("Exception happened in one seesion(" + ctx + ").", e); ctx.setKilled(); break; } } } }
how followers recover default warehouse?
private void transferToLeader() { FrontendNodeType oldType = feType; if (replayer != null) { replayer.setStop(); try { replayer.join(); } catch (InterruptedException e) { LOG.warn("got exception when stopping the replayer thread", e); } replayer = null; } isReady.set(false); try { journal.open(); if (!haProtocol.fencing()) { throw new Exception("fencing failed. will exit"); } long maxJournalId = journal.getMaxJournalId(); replayJournal(maxJournalId); nodeMgr.checkCurrentNodeExist(); journalWriter.init(maxJournalId); } catch (Exception e) { LOG.error("failed to init journal after transfer to leader! will exit", e); System.exit(-1); } journalWriter.startDaemon(); feType = FrontendNodeType.LEADER; try { int starrocksMetaVersion = MetaContext.get().getStarRocksMetaVersion(); if (starrocksMetaVersion < FeConstants.STARROCKS_META_VERSION) { editLog.logMetaVersion(new MetaVersion(FeConstants.STARROCKS_META_VERSION)); MetaContext.get().setStarRocksMetaVersion(FeConstants.STARROCKS_META_VERSION); } if (nodeMgr.isFirstTimeStartUp()) { Frontend self = nodeMgr.getMySelf(); Preconditions.checkNotNull(self); editLog.logAddFirstFrontend(self); } if (!isDefaultClusterCreated) { initDefaultCluster(); } nodeMgr.setLeaderInfo(); if (USING_NEW_PRIVILEGE) { if (needUpgradedToNewPrivilege()) { reInitializeNewPrivilegeOnUpgrade(); AuthUpgrader upgrader = new AuthUpgrader(auth, authenticationManager, authorizationManager, this); upgrader.upgradeAsLeader(); this.domainResolver.setAuthenticationManager(authenticationManager); } LOG.info("set usingNewPrivilege to true after transfer to leader"); usingNewPrivilege.set(true); auth = null; } startLeaderOnlyDaemonThreads(); startAllNodeTypeDaemonThreads(); insertOverwriteJobManager.cancelRunningJobs(); if (!isDefaultWarehouseCreated) { initDefaultWarehouse(); } updateDefaultWarehouse(); MetricRepo.init(); isReady.set(true); String msg = "leader finished to replay journal, can write now."; Util.stdoutWithTime(msg); LOG.info(msg); ThreadPoolManager.registerAllThreadPoolMetric(); if (nodeMgr.isFirstTimeStartUp()) { VariableMgr.setSystemVariable(VariableMgr.getDefaultSessionVariable(), new SystemVariable(SetType.GLOBAL, SessionVariable.ENABLE_ADAPTIVE_SINK_DOP, LiteralExpr.create("true", Type.BOOLEAN)), false); } } catch (UserException e) { LOG.warn("Failed to set ENABLE_ADAPTIVE_SINK_DOP", e); } catch (Throwable t) { LOG.warn("transfer to leader failed with error", t); feType = oldType; throw t; } }
updateDefaultWarehouse();
private void transferToLeader() { FrontendNodeType oldType = feType; if (replayer != null) { replayer.setStop(); try { replayer.join(); } catch (InterruptedException e) { LOG.warn("got exception when stopping the replayer thread", e); } replayer = null; } isReady.set(false); try { journal.open(); if (!haProtocol.fencing()) { throw new Exception("fencing failed. will exit"); } long maxJournalId = journal.getMaxJournalId(); replayJournal(maxJournalId); nodeMgr.checkCurrentNodeExist(); journalWriter.init(maxJournalId); } catch (Exception e) { LOG.error("failed to init journal after transfer to leader! will exit", e); System.exit(-1); } journalWriter.startDaemon(); feType = FrontendNodeType.LEADER; try { int starrocksMetaVersion = MetaContext.get().getStarRocksMetaVersion(); if (starrocksMetaVersion < FeConstants.STARROCKS_META_VERSION) { editLog.logMetaVersion(new MetaVersion(FeConstants.STARROCKS_META_VERSION)); MetaContext.get().setStarRocksMetaVersion(FeConstants.STARROCKS_META_VERSION); } if (nodeMgr.isFirstTimeStartUp()) { Frontend self = nodeMgr.getMySelf(); Preconditions.checkNotNull(self); editLog.logAddFirstFrontend(self); } if (!isDefaultClusterCreated) { initDefaultCluster(); } nodeMgr.setLeaderInfo(); if (USING_NEW_PRIVILEGE) { if (needUpgradedToNewPrivilege()) { reInitializeNewPrivilegeOnUpgrade(); AuthUpgrader upgrader = new AuthUpgrader(auth, authenticationManager, authorizationManager, this); upgrader.upgradeAsLeader(); this.domainResolver.setAuthenticationManager(authenticationManager); } LOG.info("set usingNewPrivilege to true after transfer to leader"); usingNewPrivilege.set(true); auth = null; } startLeaderOnlyDaemonThreads(); startAllNodeTypeDaemonThreads(); insertOverwriteJobManager.cancelRunningJobs(); if (!isDefaultWarehouseCreated) { initDefaultWarehouse(); } updateDefaultWarehouse(); MetricRepo.init(); isReady.set(true); String msg = "leader finished to replay journal, can write now."; Util.stdoutWithTime(msg); LOG.info(msg); ThreadPoolManager.registerAllThreadPoolMetric(); if (nodeMgr.isFirstTimeStartUp()) { VariableMgr.setSystemVariable(VariableMgr.getDefaultSessionVariable(), new SystemVariable(SetType.GLOBAL, SessionVariable.ENABLE_ADAPTIVE_SINK_DOP, LiteralExpr.create("true", Type.BOOLEAN)), false); } } catch (UserException e) { LOG.warn("Failed to set ENABLE_ADAPTIVE_SINK_DOP", e); } catch (Throwable t) { LOG.warn("transfer to leader failed with error", t); feType = oldType; throw t; } }
class SingletonHolder { private static final GlobalStateMgr INSTANCE = new GlobalStateMgr(); }
class SingletonHolder { private static final GlobalStateMgr INSTANCE = new GlobalStateMgr(); }
How about changing the usage of `CompletableFuture` to [1]? Having a completed future will avoid the async aspect of the futures. [1] https://github.com/ballerina-platform/ballerina-lang/blob/master/language-server/modules/langserver-core/src/main/java/org/ballerinalang/langserver/BallerinaTextDocumentService.java#L138
public CompletableFuture<VariablesResponse> variables(VariablesArguments args) { VariablesResponse variablesResponse = new VariablesResponse(); try { Integer frameId = scopeIdToFrameIds.get(args.getVariablesReference()); if (frameId == null) { variablesResponse.setVariables(computeChildVariables(args)); return CompletableFuture.completedFuture(variablesResponse); } StackFrameProxyImpl stackFrame = stackFrames.get(Math.abs(frameId)); if (stackFrame == null) { variablesResponse.setVariables(new Variable[0]); return CompletableFuture.completedFuture(variablesResponse); } suspendedContext = new SuspendedContext(context, activeThread, stackFrame); if (frameId < 0) { variablesResponse.setVariables(computeGlobalScopeVariables(args)); } else { variablesResponse.setVariables(computeLocalScopeVariables(args)); } return CompletableFuture.completedFuture(variablesResponse); } catch (Exception e) { LOGGER.error(e.getMessage(), e); variablesResponse.setVariables(new Variable[0]); return CompletableFuture.completedFuture(variablesResponse); } }
return CompletableFuture.completedFuture(variablesResponse);
public CompletableFuture<VariablesResponse> variables(VariablesArguments args) { VariablesResponse variablesResponse = new VariablesResponse(); try { Integer frameId = scopeIdToFrameIds.get(args.getVariablesReference()); if (frameId == null) { variablesResponse.setVariables(computeChildVariables(args)); return CompletableFuture.completedFuture(variablesResponse); } StackFrameProxyImpl stackFrame = stackFrames.get(Math.abs(frameId)); if (stackFrame == null) { variablesResponse.setVariables(new Variable[0]); return CompletableFuture.completedFuture(variablesResponse); } suspendedContext = new SuspendedContext(context, activeThread, stackFrame); if (frameId < 0) { variablesResponse.setVariables(computeGlobalScopeVariables(args)); } else { variablesResponse.setVariables(computeLocalScopeVariables(args)); } return CompletableFuture.completedFuture(variablesResponse); } catch (Exception e) { LOGGER.error(e.getMessage(), e); variablesResponse.setVariables(new Variable[0]); return CompletableFuture.completedFuture(variablesResponse); } }
class JBallerinaDebugServer implements IDebugProtocolServer { private IDebugProtocolClient client; private ClientConfigHolder clientConfigHolder; private DebugExecutionManager executionManager; private JDIEventProcessor eventProcessor; private final ExecutionContext context; private ThreadReferenceProxyImpl activeThread; private SuspendedContext suspendedContext; private DebugOutputLogger outputLogger; private final AtomicInteger nextVarReference = new AtomicInteger(); private final Map<Integer, StackFrameProxyImpl> stackFrames = new HashMap<>(); private final Map<Long, StackFrame[]> threadStackTraces = new HashMap<>(); private final Map<Integer, Integer> scopeIdToFrameIds = new HashMap<>(); private final Map<Integer, Integer> variableToStackFrames = new ConcurrentHashMap<>(); private final Map<Integer, BCompoundVariable> loadedCompoundVariables = new ConcurrentHashMap<>(); private static final Logger LOGGER = LoggerFactory.getLogger(JBallerinaDebugServer.class); private static final String SCOPE_NAME_LOCAL = "Local"; private static final String SCOPE_NAME_GLOBAL = "Global"; private static final String VALUE_UNKNOWN = "unknown"; private static final String EVAL_ARGS_CONTEXT_VARIABLES = "variables"; private static final String COMPILATION_ERROR_MESSAGE = "error: compilation contains errors"; public JBallerinaDebugServer() { context = new ExecutionContext(this); } public ExecutionContext getContext() { return context; } ClientConfigHolder getClientConfigHolder() { return clientConfigHolder; } public DebugOutputLogger getOutputLogger() { return outputLogger; } @Override public CompletableFuture<Capabilities> initialize(InitializeRequestArguments args) { Capabilities capabilities = new Capabilities(); capabilities.setSupportsConfigurationDoneRequest(true); capabilities.setSupportsTerminateRequest(true); capabilities.setSupportTerminateDebuggee(true); capabilities.setSupportsConditionalBreakpoints(true); capabilities.setSupportsLogPoints(true); capabilities.setSupportsCompletionsRequest(false); capabilities.setSupportsRestartRequest(false); capabilities.setSupportsHitConditionalBreakpoints(false); capabilities.setSupportsModulesRequest(false); capabilities.setSupportsStepBack(false); capabilities.setSupportsTerminateThreadsRequest(false); capabilities.setSupportsFunctionBreakpoints(false); capabilities.setSupportsFunctionBreakpoints(false); eventProcessor = new JDIEventProcessor(context); outputLogger = new DebugOutputLogger(client); context.setClient(client); client.initialized(); return CompletableFuture.completedFuture(capabilities); } @Override public CompletableFuture<SetBreakpointsResponse> setBreakpoints(SetBreakpointsArguments args) { BalBreakpoint[] balBreakpoints = Arrays.stream(args.getBreakpoints()) .map((SourceBreakpoint sourceBreakpoint) -> toBalBreakpoint(sourceBreakpoint, args.getSource())) .toArray(BalBreakpoint[]::new); Breakpoint[] breakpoints = Arrays.stream(balBreakpoints) .map(BalBreakpoint::getAsDAPBreakpoint) .toArray(Breakpoint[]::new); Map<Integer, BalBreakpoint> breakpointsMap = new HashMap<>(); for (BalBreakpoint bp : balBreakpoints) { breakpointsMap.put(bp.getLine(), bp); } SetBreakpointsResponse breakpointsResponse = new SetBreakpointsResponse(); breakpointsResponse.setBreakpoints(breakpoints); String sourcePath = args.getSource().getPath(); eventProcessor.setBreakpoints(sourcePath, breakpointsMap); return CompletableFuture.completedFuture(breakpointsResponse); } @Override public CompletableFuture<Void> configurationDone(ConfigurationDoneArguments args) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> launch(Map<String, Object> args) { try { clearState(); context.setDebugMode(ExecutionContext.DebugMode.LAUNCH); clientConfigHolder = new ClientLaunchConfigHolder(args); context.setSourceProject(loadProject(clientConfigHolder.getSourcePath())); String sourceProjectRoot = context.getSourceProjectRoot(); BProgramRunner programRunner = context.getSourceProject() instanceof SingleFileProject ? new BFileRunner((ClientLaunchConfigHolder) clientConfigHolder, sourceProjectRoot) : new BPackageRunner((ClientLaunchConfigHolder) clientConfigHolder, sourceProjectRoot); context.setLaunchedProcess(programRunner.start()); startListeningToProgramOutput(); return CompletableFuture.completedFuture(null); } catch (Exception e) { outputLogger.sendErrorOutput("Failed to launch the ballerina program due to: " + e); return CompletableFuture.completedFuture(null); } } @Override public CompletableFuture<Void> attach(Map<String, Object> args) { try { clearState(); context.setDebugMode(ExecutionContext.DebugMode.ATTACH); clientConfigHolder = new ClientAttachConfigHolder(args); context.setSourceProject(loadProject(clientConfigHolder.getSourcePath())); ClientAttachConfigHolder configHolder = (ClientAttachConfigHolder) clientConfigHolder; String hostName = configHolder.getHostName().orElse(""); int portName = configHolder.getDebuggePort(); attachToRemoteVM(hostName, portName); } catch (Exception e) { String host = ((ClientAttachConfigHolder) clientConfigHolder).getHostName().orElse(LOCAL_HOST); String portName; try { portName = Integer.toString(clientConfigHolder.getDebuggePort()); } catch (ClientConfigurationException clientConfigurationException) { portName = VALUE_UNKNOWN; } LOGGER.error(e.getMessage()); outputLogger.sendErrorOutput(String.format("Failed to attach to the target VM, address: '%s:%s'.", host, portName)); } return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<ThreadsResponse> threads() { ThreadsResponse threadsResponse = new ThreadsResponse(); if (eventProcessor == null) { return CompletableFuture.completedFuture(threadsResponse); } Map<Integer, ThreadReferenceProxyImpl> threadsMap = getActiveStrandThreads(); if (threadsMap == null) { return CompletableFuture.completedFuture(threadsResponse); } Thread[] threads = new Thread[threadsMap.size()]; threadsMap.values().stream().map(this::toDapThread).collect(Collectors.toList()).toArray(threads); threadsResponse.setThreads(threads); return CompletableFuture.completedFuture(threadsResponse); } @Override public CompletableFuture<Void> pause(PauseArguments args) { VirtualMachineProxyImpl debuggeeVM = context.getDebuggeeVM(); if (!debuggeeVM.canBeModified()) { getOutputLogger().sendConsoleOutput("Failed to suspend the remote VM due to: pause requests are not " + "supported on read-only VMs"); return CompletableFuture.completedFuture(null); } debuggeeVM.suspend(); eventProcessor.notifyStopEvent(StoppedEventArgumentsReason.PAUSE, args.getThreadId()); return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<StackTraceResponse> stackTrace(StackTraceArguments args) { StackTraceResponse stackTraceResponse = new StackTraceResponse(); try { activeThread = getAllThreads().get(args.getThreadId()); if (threadStackTraces.containsKey(activeThread.uniqueID())) { stackTraceResponse.setStackFrames(threadStackTraces.get(activeThread.uniqueID())); } else { StackFrame[] validFrames = activeThread.frames().stream() .map(this::toDapStackFrame) .filter(JBallerinaDebugServer::isValidFrame) .toArray(StackFrame[]::new); stackTraceResponse.setStackFrames(validFrames); threadStackTraces.put(activeThread.uniqueID(), validFrames); } return CompletableFuture.completedFuture(stackTraceResponse); } catch (Exception e) { LOGGER.error(e.getMessage(), e); stackTraceResponse.setStackFrames(new StackFrame[0]); return CompletableFuture.completedFuture(stackTraceResponse); } } @Override public CompletableFuture<ScopesResponse> scopes(ScopesArguments args) { Scope localScope = new Scope(); localScope.setName(SCOPE_NAME_LOCAL); scopeIdToFrameIds.put(nextVarReference.get(), args.getFrameId()); localScope.setVariablesReference(nextVarReference.getAndIncrement()); Scope globalScope = new Scope(); globalScope.setName(SCOPE_NAME_GLOBAL); scopeIdToFrameIds.put(nextVarReference.get(), -args.getFrameId()); globalScope.setVariablesReference(nextVarReference.getAndIncrement()); Scope[] scopes = {localScope, globalScope}; ScopesResponse scopesResponse = new ScopesResponse(); scopesResponse.setScopes(scopes); return CompletableFuture.completedFuture(scopesResponse); } @Override @Override public CompletableFuture<SourceResponse> source(SourceArguments args) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<ContinueResponse> continue_(ContinueArguments args) { prepareFor(DebugInstruction.CONTINUE); context.getDebuggeeVM().resume(); ContinueResponse continueResponse = new ContinueResponse(); continueResponse.setAllThreadsContinued(true); return CompletableFuture.completedFuture(continueResponse); } @Override public CompletableFuture<Void> next(NextArguments args) { prepareFor(DebugInstruction.STEP_OVER); eventProcessor.sendStepRequest(args.getThreadId(), StepRequest.STEP_OVER); return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> stepIn(StepInArguments args) { prepareFor(DebugInstruction.STEP_IN); eventProcessor.sendStepRequest(args.getThreadId(), StepRequest.STEP_INTO); return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> stepOut(StepOutArguments args) { stepOut(args.getThreadId()); return CompletableFuture.completedFuture(null); } void stepOut(int threadId) { prepareFor(DebugInstruction.STEP_OUT); eventProcessor.sendStepRequest(threadId, StepRequest.STEP_OUT); } @Override public CompletableFuture<Void> setExceptionBreakpoints(SetExceptionBreakpointsArguments args) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<EvaluateResponse> evaluate(EvaluateArguments args) { if (executionManager == null || !executionManager.isActive()) { context.getOutputLogger().sendErrorOutput(EvaluationExceptionKind.PREFIX + "Debug server is not " + "connected to any program VM."); return CompletableFuture.completedFuture(new EvaluateResponse()); } if (args.getFrameId() == null) { context.getOutputLogger().sendErrorOutput(EvaluationExceptionKind.PREFIX + "Remote VM is not suspended " + "and still in running state."); return CompletableFuture.completedFuture(new EvaluateResponse()); } if (args.getContext() != null && args.getContext().equals(EVAL_ARGS_CONTEXT_VARIABLES)) { EvaluateResponse response = new EvaluateResponse(); response.setResult(args.getExpression()); return CompletableFuture.completedFuture(response); } return CompletableFuture.supplyAsync(() -> { try { StackFrameProxyImpl frame = stackFrames.get(args.getFrameId()); SuspendedContext suspendedContext = new SuspendedContext(context, activeThread, frame); EvaluationContext evaluationContext = new EvaluationContext(suspendedContext); DebugExpressionEvaluator evaluator = new DebugExpressionEvaluator(evaluationContext); evaluator.setExpression(args.getExpression()); BVariable evaluationResult = evaluator.evaluate().getBVariable(); return constructEvaluateResponse(args, evaluationResult); } catch (EvaluationException e) { context.getOutputLogger().sendErrorOutput(e.getMessage()); return new EvaluateResponse(); } catch (Exception e) { context.getOutputLogger().sendErrorOutput(EvaluationExceptionKind.PREFIX + "internal error"); return new EvaluateResponse(); } }); } @Override public CompletableFuture<SetFunctionBreakpointsResponse> setFunctionBreakpoints( SetFunctionBreakpointsArguments args) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> disconnect(DisconnectArguments args) { context.setTerminateRequestReceived(true); boolean terminateDebuggee = Objects.requireNonNullElse(args.getTerminateDebuggee(), true); terminateServer(terminateDebuggee); return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> terminate(TerminateArguments args) { context.setTerminateRequestReceived(true); terminateServer(true); return CompletableFuture.completedFuture(null); } void terminateServer(boolean terminateDebuggee) { if (context.getLaunchedProcess().isPresent() && context.getLaunchedProcess().get().isAlive()) { killProcessWithDescendants(context.getLaunchedProcess().get()); } if (terminateDebuggee && context.getDebuggeeVM() != null) { int exitCode = 0; if (context.getDebuggeeVM().process() != null) { exitCode = killProcessWithDescendants(context.getDebuggeeVM().process()); } try { context.getDebuggeeVM().exit(exitCode); } catch (Exception ignored) { } } if (!context.isTerminateRequestReceived()) { ExitedEventArguments exitedEventArguments = new ExitedEventArguments(); exitedEventArguments.setExitCode(0); context.getClient().exited(exitedEventArguments); } if (executionManager != null) { String address = (executionManager.getHost().isPresent() && executionManager.getPort().isPresent()) ? executionManager.getHost().get() + ":" + executionManager.getPort().get() : VALUE_UNKNOWN; outputLogger.sendDebugServerOutput(String.format(System.lineSeparator() + "Disconnected from the target " + "VM, address: '%s'", address)); } new java.lang.Thread(() -> { try { java.lang.Thread.sleep(500); } catch (InterruptedException ignored) { } System.exit(0); }).start(); } private static int killProcessWithDescendants(Process parent) { try { parent.descendants().forEach(processHandle -> { boolean successful = processHandle.destroy(); if (!successful) { processHandle.destroyForcibly(); } }); parent.destroyForcibly(); parent.waitFor(); return parent.exitValue(); } catch (InterruptedException ignored) { return 0; } catch (Exception e) { return 1; } } public void connect(IDebugProtocolClient client) { this.client = client; } private synchronized void updateVariableToStackFrameMap(int parent, int child) { if (!variableToStackFrames.containsKey(parent)) { variableToStackFrames.put(child, parent); return; } Integer parentRef; do { parentRef = variableToStackFrames.get(parent); } while (variableToStackFrames.containsKey(parentRef)); variableToStackFrames.put(child, parentRef); } /** * Converts a JDI thread reference to a DAP thread instance. * * @param threadReference JDI thread reference */ Thread toDapThread(ThreadReferenceProxyImpl threadReference) { Thread thread = new Thread(); thread.setId((int) threadReference.uniqueID()); thread.setName(threadReference.name()); return thread; } /** * Coverts a JDI stack frame instance to a DAP stack frame instance. */ private StackFrame toDapStackFrame(StackFrameProxyImpl stackFrameProxy) { try { if (!isBalStackFrame(stackFrameProxy.getStackFrame())) { return null; } int referenceId = nextVarReference.getAndIncrement(); stackFrames.put(referenceId, stackFrameProxy); BallerinaStackFrame balStackFrame = new BallerinaStackFrame(context, referenceId, stackFrameProxy); return balStackFrame.getAsDAPStackFrame().orElse(null); } catch (JdiProxyException e) { return null; } } private BalBreakpoint toBalBreakpoint(SourceBreakpoint sourceBreakpoint, Source source) { BalBreakpoint breakpoint = new BalBreakpoint(source, sourceBreakpoint.getLine()); breakpoint.setCondition(sourceBreakpoint.getCondition()); breakpoint.setLogMessage(sourceBreakpoint.getLogMessage()); return breakpoint; } /** * Returns a map of all currently running threads in the remote VM, against their unique ID. * <p> * Thread objects that have not yet been started (see {@link java.lang.Thread * and thread objects that have completed their execution are not included in the returned list. */ Map<Integer, ThreadReferenceProxyImpl> getAllThreads() { if (context.getDebuggeeVM() == null) { return null; } Collection<ThreadReference> threadReferences = context.getDebuggeeVM().getVirtualMachine().allThreads(); Map<Integer, ThreadReferenceProxyImpl> threadsMap = new HashMap<>(); for (ThreadReference threadReference : threadReferences) { threadsMap.put((int) threadReference.uniqueID(), new ThreadReferenceProxyImpl(context.getDebuggeeVM(), threadReference)); } return threadsMap; } /** * Returns a map of thread instances which correspond to an active ballerina strand, against their unique ID. */ Map<Integer, ThreadReferenceProxyImpl> getActiveStrandThreads() { Map<Integer, ThreadReferenceProxyImpl> allThreads = getAllThreads(); if (allThreads == null) { return null; } Map<Integer, ThreadReferenceProxyImpl> balStrandThreads = new HashMap<>(); allThreads.forEach((id, threadProxy) -> { ThreadReference threadReference = threadProxy.getThreadReference(); if (threadReference.status() == ThreadReference.THREAD_STATUS_RUNNING && !threadReference.name().equals("Reference Handler") && !threadReference.name().equals("Signal Dispatcher") && threadReference.isSuspended() && isBalStrand(threadReference) ) { balStrandThreads.put(id, new ThreadReferenceProxyImpl(context.getDebuggeeVM(), threadReference)); } }); return balStrandThreads; } /** * Validates whether the given DAP thread reference represents a ballerina strand. * <p> * * @param threadReference DAP thread reference * @return true if the given DAP thread reference represents a ballerina strand. */ private static boolean isBalStrand(ThreadReference threadReference) { try { return isBalStackFrame(threadReference.frames().get(0)); } catch (Exception e) { return false; } } /** * Validates whether the given DAP stack frame represents a ballerina call stack frame. * * @param frame DAP stack frame * @return true if the given DAP stack frame represents a ballerina call stack frame. */ static boolean isBalStackFrame(com.sun.jdi.StackFrame frame) { try { return frame.location().sourceName().endsWith(BAL_FILE_EXT); } catch (Exception e) { return false; } } /** * Validates a given ballerina stack frame for for its source information. * * @param stackFrame ballerina stack frame * @return true if its a valid ballerina frame */ static boolean isValidFrame(StackFrame stackFrame) { return stackFrame != null && stackFrame.getSource() != null && stackFrame.getLine() > 0; } /** * Asynchronously listens to remote debuggee stdout + error streams and redirects the output to the client debug * console. */ private void startListeningToProgramOutput() { CompletableFuture.runAsync(() -> { if (context.getLaunchedProcess().isEmpty()) { return; } try (BufferedReader errorStream = context.getErrorStream()) { String line; while ((line = errorStream.readLine()) != null) { outputLogger.sendConsoleOutput(line); if (context.getDebuggeeVM() == null && line.contains(COMPILATION_ERROR_MESSAGE)) { terminateServer(false); } } } catch (IOException ignored) { } }); CompletableFuture.runAsync(() -> { if (context.getLaunchedProcess().isEmpty()) { return; } try (BufferedReader inputStream = context.getInputStream()) { String line; outputLogger.sendDebugServerOutput("Waiting for debug process to start..." + System.lineSeparator() + System.lineSeparator()); while ((line = inputStream.readLine()) != null) { if (line.contains("Listening for transport dt_socket")) { attachToRemoteVM("", clientConfigHolder.getDebuggePort()); } else if (context.getDebuggeeVM() == null && line.contains(COMPILATION_ERROR_MESSAGE)) { terminateServer(false); } outputLogger.sendProgramOutput(line); } } catch (Exception e) { String host = clientConfigHolder instanceof ClientAttachConfigHolder ? ((ClientAttachConfigHolder) clientConfigHolder).getHostName().orElse(LOCAL_HOST) : LOCAL_HOST; String portName; try { portName = Integer.toString(clientConfigHolder.getDebuggePort()); } catch (ClientConfigurationException clientConfigurationException) { portName = VALUE_UNKNOWN; } LOGGER.error(e.getMessage()); outputLogger.sendDebugServerOutput(String.format("Failed to attach to the target VM, address: '%s:%s'.", host, portName)); terminateServer(context.getDebuggeeVM() != null); } }); } /** * Attach to the remote VM using host address and port. * * @param hostName host address * @param portName host port */ private void attachToRemoteVM(String hostName, int portName) throws IOException, IllegalConnectorArgumentsException { executionManager = new DebugExecutionManager(this); VirtualMachine attachedVm = executionManager.attach(hostName, portName); context.setDebuggeeVM(new VirtualMachineProxyImpl(attachedVm)); EventRequestManager erm = context.getEventManager(); ClassPrepareRequest classPrepareRequest = erm.createClassPrepareRequest(); classPrepareRequest.enable(); eventProcessor.startListening(); } /** * Clears previous state information and prepares for the given debug instruction type execution. */ private void prepareFor(DebugInstruction instruction) { clearState(); eventProcessor.restoreBreakpoints(instruction); context.setLastInstruction(instruction); } private Variable[] computeGlobalScopeVariables(VariablesArguments requestArgs) throws Exception { int stackFrameReference = requestArgs.getVariablesReference(); String classQName = PackageUtils.getQualifiedClassName(suspendedContext, INIT_CLASS_NAME); List<ReferenceType> cls = suspendedContext.getAttachedVm().classesByName(classQName); if (cls.size() != 1) { return new Variable[0]; } List<CompletableFuture<Variable>> scheduledVariables = new ArrayList<>(); ReferenceType initClassReference = cls.get(0); for (Field field : initClassReference.allFields()) { String fieldName = IdentifierUtils.decodeIdentifier(field.name()); if (!field.isPublic() || !field.isStatic() || fieldName.startsWith(GENERATED_VAR_PREFIX)) { continue; } Value fieldValue = initClassReference.getValue(field); scheduledVariables.add(computeVariableAsync(fieldName, fieldValue, stackFrameReference)); } return scheduledVariables.stream() .map(varFuture -> { try { return varFuture.get(1000, TimeUnit.MILLISECONDS); } catch (Exception ignored) { return null; } }) .filter(Objects::nonNull) .toArray(Variable[]::new); } private Variable[] computeLocalScopeVariables(VariablesArguments args) throws Exception { StackFrameProxyImpl stackFrame = suspendedContext.getFrame(); List<CompletableFuture<Variable>> scheduledVariables = new ArrayList<>(); List<CompletableFuture<Variable[]>> scheduledLambdaMapVariables = new ArrayList<>(); List<LocalVariableProxyImpl> localVariableProxies = stackFrame.visibleVariables(); for (LocalVariableProxyImpl var : localVariableProxies) { String name = var.name(); Value value = stackFrame.getValue(var); if (VariableUtils.isLambdaParamMap(var)) { scheduledLambdaMapVariables.add(fetchLocalVariablesFromMap(args, stackFrame, var)); } else { CompletableFuture<Variable> dapVariable = computeVariableAsync(name, value, args.getVariablesReference()); scheduledVariables.add(dapVariable); } } List<Variable> resolvedVariables = new ArrayList<>(); scheduledVariables.forEach(varFuture -> { try { Variable variable = varFuture.get(1000, TimeUnit.MILLISECONDS); if (variable != null) { resolvedVariables.add(variable); } } catch (Exception ignored) { } }); scheduledLambdaMapVariables.forEach(varFuture -> { try { Variable[] variables = varFuture.get(1000, TimeUnit.MILLISECONDS); if (variables != null) { resolvedVariables.addAll(Arrays.asList(variables)); } } catch (Exception ignored) { } }); return resolvedVariables.toArray(new Variable[0]); } /** * Returns the list of local variables extracted from the given variable map, which contains local variables used * within lambda functions. * * @param args variable args * @param stackFrame parent stack frame instance * @param lambdaParamMapVar map variable instance * @return list of local variables extracted from the given variable map */ private CompletableFuture<Variable[]> fetchLocalVariablesFromMap(VariablesArguments args, StackFrameProxyImpl stackFrame, LocalVariableProxyImpl lambdaParamMapVar) { try { Value value = stackFrame.getValue(lambdaParamMapVar); CompletableFuture<Variable> scheduledVariable = computeVariableAsync("lambdaArgMap", value, args.getVariablesReference()); Variable dapVariable = scheduledVariable.get(); if (dapVariable == null || !dapVariable.getType().equals(BVariableType.MAP.getString())) { return new CompletableFuture<>(); } VariablesArguments childVarRequestArgs = new VariablesArguments(); childVarRequestArgs.setVariablesReference(dapVariable.getVariablesReference()); return computeChildVariablesAsync(childVarRequestArgs); } catch (Exception e) { return new CompletableFuture<>(); } } /** * Asynchronously computes a debugger adapter protocol supported variable instance from Coverts a given ballerina * runtime value instance. * * @param name variable name * @param value runtime value of the variable * @param stackFrameRef reference ID of the parent stack frame */ private CompletableFuture<Variable> computeVariableAsync(String name, Value value, Integer stackFrameRef) { return CompletableFuture.supplyAsync(() -> { BVariable variable = VariableFactory.getVariable(suspendedContext, name, value); if (variable == null) { return null; } if (variable instanceof BSimpleVariable) { variable.getDapVariable().setVariablesReference(0); } else if (variable instanceof BCompoundVariable) { int variableReference = nextVarReference.getAndIncrement(); variable.getDapVariable().setVariablesReference(variableReference); loadedCompoundVariables.put(variableReference, (BCompoundVariable) variable); updateVariableToStackFrameMap(stackFrameRef, variableReference); } return variable.getDapVariable(); }); } private CompletableFuture<Variable[]> computeChildVariablesAsync(VariablesArguments args) { return CompletableFuture.supplyAsync(() -> computeChildVariables(args)); } private Variable[] computeChildVariables(VariablesArguments args) { BCompoundVariable parentVar = loadedCompoundVariables.get(args.getVariablesReference()); Integer stackFrameId = variableToStackFrames.get(args.getVariablesReference()); if (stackFrameId == null) { return new Variable[0]; } if (parentVar instanceof IndexedCompoundVariable) { int startIndex = (args.getStart() != null) ? args.getStart() : 0; int count = (args.getCount() != null) ? args.getCount() : 0; Either<Map<String, Value>, List<Value>> childVars = ((IndexedCompoundVariable) parentVar) .getIndexedChildVariables(startIndex, count); if (childVars.isLeft()) { return createVariableArrayFrom(args, childVars.getLeft()); } else if (childVars.isRight()) { return createVariableArrayFrom(args, childVars.getRight()); } return new Variable[0]; } else if (parentVar instanceof NamedCompoundVariable) { Map<String, Value> childVars = ((NamedCompoundVariable) parentVar).getNamedChildVariables(); return createVariableArrayFrom(args, childVars); } return new Variable[0]; } private Variable[] createVariableArrayFrom(VariablesArguments args, Map<String, Value> varMap) { return varMap.entrySet().stream().map(entry -> { String name = entry.getKey(); Value value = entry.getValue(); BVariable variable = VariableFactory.getVariable(suspendedContext, name, value); if (variable == null) { return null; } else if (variable instanceof BSimpleVariable) { variable.getDapVariable().setVariablesReference(0); } else if (variable instanceof BCompoundVariable) { int variableReference = nextVarReference.getAndIncrement(); variable.getDapVariable().setVariablesReference(variableReference); loadedCompoundVariables.put(variableReference, (BCompoundVariable) variable); updateVariableToStackFrameMap(args.getVariablesReference(), variableReference); } return variable.getDapVariable(); }).filter(Objects::nonNull).toArray(Variable[]::new); } private Variable[] createVariableArrayFrom(VariablesArguments args, List<Value> varMap) { int startIndex = (args.getStart() != null) ? args.getStart() : 0; AtomicInteger index = new AtomicInteger(startIndex); return varMap.stream().map(value -> { String name = String.format("[%d]", index.getAndIncrement()); BVariable variable = VariableFactory.getVariable(suspendedContext, name, value); if (variable == null) { return null; } else if (variable instanceof BSimpleVariable) { variable.getDapVariable().setVariablesReference(0); } else if (variable instanceof BCompoundVariable) { int variableReference = nextVarReference.getAndIncrement(); variable.getDapVariable().setVariablesReference(variableReference); loadedCompoundVariables.put(variableReference, (BCompoundVariable) variable); updateVariableToStackFrameMap(args.getVariablesReference(), variableReference); } return variable.getDapVariable(); }).filter(Objects::nonNull).toArray(Variable[]::new); } /** * Creates a {@link EvaluateResponse} from the given evaluation result variable. * * @param args evaluation arguments. * @param evaluationResult evaluation result variable */ private EvaluateResponse constructEvaluateResponse(EvaluateArguments args, BVariable evaluationResult) { EvaluateResponse response = new EvaluateResponse(); if (evaluationResult == null) { return response; } else if (evaluationResult instanceof BSimpleVariable) { evaluationResult.getDapVariable().setVariablesReference(0); } else if (evaluationResult instanceof BCompoundVariable) { int variableReference = nextVarReference.getAndIncrement(); evaluationResult.getDapVariable().setVariablesReference(variableReference); loadedCompoundVariables.put(variableReference, (BCompoundVariable) evaluationResult); updateVariableToStackFrameMap(args.getFrameId(), variableReference); } Variable dapVariable = evaluationResult.getDapVariable(); response.setResult(dapVariable.getValue()); response.setType(dapVariable.getType()); response.setIndexedVariables(dapVariable.getIndexedVariables()); response.setNamedVariables(dapVariable.getNamedVariables()); response.setVariablesReference(dapVariable.getVariablesReference()); return response; } /** * Clears state information. */ private void clearState() { suspendedContext = null; activeThread = null; stackFrames.clear(); loadedCompoundVariables.clear(); variableToStackFrames.clear(); scopeIdToFrameIds.clear(); threadStackTraces.clear(); nextVarReference.set(1); } }
class JBallerinaDebugServer implements IDebugProtocolServer { private IDebugProtocolClient client; private ClientConfigHolder clientConfigHolder; private DebugExecutionManager executionManager; private JDIEventProcessor eventProcessor; private final ExecutionContext context; private ThreadReferenceProxyImpl activeThread; private SuspendedContext suspendedContext; private DebugOutputLogger outputLogger; private final AtomicInteger nextVarReference = new AtomicInteger(); private final Map<Integer, StackFrameProxyImpl> stackFrames = new HashMap<>(); private final Map<Long, StackFrame[]> threadStackTraces = new HashMap<>(); private final Map<Integer, Integer> scopeIdToFrameIds = new HashMap<>(); private final Map<Integer, Integer> variableToStackFrames = new ConcurrentHashMap<>(); private final Map<Integer, BCompoundVariable> loadedCompoundVariables = new ConcurrentHashMap<>(); private final ExecutorService variableExecutor = Executors.newCachedThreadPool(); private static final Logger LOGGER = LoggerFactory.getLogger(JBallerinaDebugServer.class); private static final String SCOPE_NAME_LOCAL = "Local"; private static final String SCOPE_NAME_GLOBAL = "Global"; private static final String VALUE_UNKNOWN = "unknown"; private static final String EVAL_ARGS_CONTEXT_VARIABLES = "variables"; private static final String COMPILATION_ERROR_MESSAGE = "error: compilation contains errors"; public JBallerinaDebugServer() { context = new ExecutionContext(this); } public ExecutionContext getContext() { return context; } ClientConfigHolder getClientConfigHolder() { return clientConfigHolder; } public DebugOutputLogger getOutputLogger() { return outputLogger; } @Override public CompletableFuture<Capabilities> initialize(InitializeRequestArguments args) { Capabilities capabilities = new Capabilities(); capabilities.setSupportsConfigurationDoneRequest(true); capabilities.setSupportsTerminateRequest(true); capabilities.setSupportTerminateDebuggee(true); capabilities.setSupportsConditionalBreakpoints(true); capabilities.setSupportsLogPoints(true); capabilities.setSupportsCompletionsRequest(true); capabilities.setSupportsRestartRequest(false); capabilities.setSupportsHitConditionalBreakpoints(false); capabilities.setSupportsModulesRequest(false); capabilities.setSupportsStepBack(false); capabilities.setSupportsTerminateThreadsRequest(false); capabilities.setSupportsFunctionBreakpoints(false); capabilities.setSupportsFunctionBreakpoints(false); eventProcessor = new JDIEventProcessor(context); outputLogger = new DebugOutputLogger(client); context.setClient(client); client.initialized(); return CompletableFuture.completedFuture(capabilities); } @Override public CompletableFuture<SetBreakpointsResponse> setBreakpoints(SetBreakpointsArguments args) { BalBreakpoint[] balBreakpoints = Arrays.stream(args.getBreakpoints()) .map((SourceBreakpoint sourceBreakpoint) -> toBalBreakpoint(sourceBreakpoint, args.getSource())) .toArray(BalBreakpoint[]::new); Breakpoint[] breakpoints = Arrays.stream(balBreakpoints) .map(BalBreakpoint::getAsDAPBreakpoint) .toArray(Breakpoint[]::new); Map<Integer, BalBreakpoint> breakpointsMap = new HashMap<>(); for (BalBreakpoint bp : balBreakpoints) { breakpointsMap.put(bp.getLine(), bp); } SetBreakpointsResponse breakpointsResponse = new SetBreakpointsResponse(); breakpointsResponse.setBreakpoints(breakpoints); String sourcePath = args.getSource().getPath(); eventProcessor.setBreakpoints(sourcePath, breakpointsMap); return CompletableFuture.completedFuture(breakpointsResponse); } @Override public CompletableFuture<Void> configurationDone(ConfigurationDoneArguments args) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> launch(Map<String, Object> args) { try { clearState(); context.setDebugMode(ExecutionContext.DebugMode.LAUNCH); clientConfigHolder = new ClientLaunchConfigHolder(args); context.setSourceProject(loadProject(clientConfigHolder.getSourcePath())); String sourceProjectRoot = context.getSourceProjectRoot(); BProgramRunner programRunner = context.getSourceProject() instanceof SingleFileProject ? new BSingleFileRunner((ClientLaunchConfigHolder) clientConfigHolder, sourceProjectRoot) : new BPackageRunner((ClientLaunchConfigHolder) clientConfigHolder, sourceProjectRoot); context.setLaunchedProcess(programRunner.start()); startListeningToProgramOutput(); return CompletableFuture.completedFuture(null); } catch (Exception e) { outputLogger.sendErrorOutput("Failed to launch the ballerina program due to: " + e); return CompletableFuture.completedFuture(null); } } @Override public CompletableFuture<Void> attach(Map<String, Object> args) { try { clearState(); context.setDebugMode(ExecutionContext.DebugMode.ATTACH); clientConfigHolder = new ClientAttachConfigHolder(args); context.setSourceProject(loadProject(clientConfigHolder.getSourcePath())); ClientAttachConfigHolder configHolder = (ClientAttachConfigHolder) clientConfigHolder; String hostName = configHolder.getHostName().orElse(""); int portName = configHolder.getDebuggePort(); attachToRemoteVM(hostName, portName); } catch (Exception e) { String host = ((ClientAttachConfigHolder) clientConfigHolder).getHostName().orElse(LOCAL_HOST); String portName; try { portName = Integer.toString(clientConfigHolder.getDebuggePort()); } catch (ClientConfigurationException clientConfigurationException) { portName = VALUE_UNKNOWN; } LOGGER.error(e.getMessage()); outputLogger.sendErrorOutput(String.format("Failed to attach to the target VM, address: '%s:%s'.", host, portName)); } return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<ThreadsResponse> threads() { ThreadsResponse threadsResponse = new ThreadsResponse(); if (eventProcessor == null) { return CompletableFuture.completedFuture(threadsResponse); } Map<Integer, ThreadReferenceProxyImpl> threadsMap = getActiveStrandThreads(); if (threadsMap == null) { return CompletableFuture.completedFuture(threadsResponse); } Thread[] threads = new Thread[threadsMap.size()]; threadsMap.values().stream().map(this::toDapThread).collect(Collectors.toList()).toArray(threads); threadsResponse.setThreads(threads); return CompletableFuture.completedFuture(threadsResponse); } @Override public CompletableFuture<Void> pause(PauseArguments args) { VirtualMachineProxyImpl debuggeeVM = context.getDebuggeeVM(); if (!debuggeeVM.canBeModified()) { getOutputLogger().sendConsoleOutput("Failed to suspend the remote VM due to: pause requests are not " + "supported on read-only VMs"); return CompletableFuture.completedFuture(null); } debuggeeVM.suspend(); eventProcessor.notifyStopEvent(StoppedEventArgumentsReason.PAUSE, args.getThreadId()); return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<StackTraceResponse> stackTrace(StackTraceArguments args) { StackTraceResponse stackTraceResponse = new StackTraceResponse(); try { activeThread = getAllThreads().get(args.getThreadId()); if (threadStackTraces.containsKey(activeThread.uniqueID())) { stackTraceResponse.setStackFrames(threadStackTraces.get(activeThread.uniqueID())); } else { StackFrame[] validFrames = activeThread.frames().stream() .map(this::toDapStackFrame) .filter(JBallerinaDebugServer::isValidFrame) .toArray(StackFrame[]::new); stackTraceResponse.setStackFrames(validFrames); threadStackTraces.put(activeThread.uniqueID(), validFrames); } return CompletableFuture.completedFuture(stackTraceResponse); } catch (Exception e) { LOGGER.error(e.getMessage(), e); stackTraceResponse.setStackFrames(new StackFrame[0]); return CompletableFuture.completedFuture(stackTraceResponse); } } @Override public CompletableFuture<ScopesResponse> scopes(ScopesArguments args) { Scope localScope = new Scope(); localScope.setName(SCOPE_NAME_LOCAL); scopeIdToFrameIds.put(nextVarReference.get(), args.getFrameId()); localScope.setVariablesReference(nextVarReference.getAndIncrement()); Scope globalScope = new Scope(); globalScope.setName(SCOPE_NAME_GLOBAL); scopeIdToFrameIds.put(nextVarReference.get(), -args.getFrameId()); globalScope.setVariablesReference(nextVarReference.getAndIncrement()); Scope[] scopes = {localScope, globalScope}; ScopesResponse scopesResponse = new ScopesResponse(); scopesResponse.setScopes(scopes); return CompletableFuture.completedFuture(scopesResponse); } @Override @Override public CompletableFuture<SourceResponse> source(SourceArguments args) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<ContinueResponse> continue_(ContinueArguments args) { prepareFor(DebugInstruction.CONTINUE); context.getDebuggeeVM().resume(); ContinueResponse continueResponse = new ContinueResponse(); continueResponse.setAllThreadsContinued(true); return CompletableFuture.completedFuture(continueResponse); } @Override public CompletableFuture<Void> next(NextArguments args) { prepareFor(DebugInstruction.STEP_OVER); eventProcessor.sendStepRequest(args.getThreadId(), StepRequest.STEP_OVER); return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> stepIn(StepInArguments args) { prepareFor(DebugInstruction.STEP_IN); eventProcessor.sendStepRequest(args.getThreadId(), StepRequest.STEP_INTO); return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> stepOut(StepOutArguments args) { stepOut(args.getThreadId()); return CompletableFuture.completedFuture(null); } void stepOut(int threadId) { prepareFor(DebugInstruction.STEP_OUT); eventProcessor.sendStepRequest(threadId, StepRequest.STEP_OUT); } @Override public CompletableFuture<Void> setExceptionBreakpoints(SetExceptionBreakpointsArguments args) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<EvaluateResponse> evaluate(EvaluateArguments args) { if (executionManager == null || !executionManager.isActive()) { context.getOutputLogger().sendErrorOutput(EvaluationExceptionKind.PREFIX + "Debug server is not " + "connected to any program VM."); return CompletableFuture.completedFuture(new EvaluateResponse()); } if (args.getFrameId() == null) { context.getOutputLogger().sendErrorOutput(EvaluationExceptionKind.PREFIX + "Remote VM is not suspended " + "and still in running state."); return CompletableFuture.completedFuture(new EvaluateResponse()); } if (args.getContext() != null && args.getContext().equals(EVAL_ARGS_CONTEXT_VARIABLES)) { EvaluateResponse response = new EvaluateResponse(); response.setResult(args.getExpression()); return CompletableFuture.completedFuture(response); } return CompletableFuture.supplyAsync(() -> { try { StackFrameProxyImpl frame = stackFrames.get(args.getFrameId()); SuspendedContext suspendedContext = new SuspendedContext(context, activeThread, frame); EvaluationContext evaluationContext = new EvaluationContext(suspendedContext); DebugExpressionEvaluator evaluator = new DebugExpressionEvaluator(evaluationContext); evaluator.setExpression(args.getExpression()); BVariable evaluationResult = evaluator.evaluate().getBVariable(); return constructEvaluateResponse(args, evaluationResult); } catch (EvaluationException e) { context.getOutputLogger().sendErrorOutput(e.getMessage()); return new EvaluateResponse(); } catch (Exception e) { context.getOutputLogger().sendErrorOutput(EvaluationExceptionKind.PREFIX + "internal error"); return new EvaluateResponse(); } }); } @Override public CompletableFuture<CompletionsResponse> completions(CompletionsArguments args) { return CompletableFuture.supplyAsync(() -> { CompletionsResponse completionsResponse = new CompletionsResponse(); if (suspendedContext == null) { return completionsResponse; } CompletionContext completionContext = new CompletionContext(suspendedContext); if (!triggerCharactersFound(args.getText())) { CompletionItem[] visibleSymbolCompletions = getVisibleSymbolCompletions(completionContext); completionsResponse.setTargets(visibleSymbolCompletions); return completionsResponse; } try { NonTerminalNode injectedExpressionNode = getInjectedExpressionNode(completionContext, args, clientConfigHolder.getSourcePath(), suspendedContext.getLineNumber()); Optional<Node> resolverNode = getResolverNode(injectedExpressionNode); if (resolverNode.isPresent() && resolverNode.get().kind() == SyntaxKind.FIELD_ACCESS) { FieldAccessCompletionResolver fieldAccessCompletionResolver = new FieldAccessCompletionResolver(completionContext); List<Symbol> visibleEntries = fieldAccessCompletionResolver .getVisibleEntries(((FieldAccessExpressionNode) resolverNode.get()).expression()); CompletionItem[] completions = getCompletions(visibleEntries); completionsResponse.setTargets(completions); } } catch (Exception e) { LOGGER.error(e.getMessage()); } return completionsResponse; }); } @Override public CompletableFuture<SetFunctionBreakpointsResponse> setFunctionBreakpoints( SetFunctionBreakpointsArguments args) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> disconnect(DisconnectArguments args) { context.setTerminateRequestReceived(true); boolean terminateDebuggee = Objects.requireNonNullElse(args.getTerminateDebuggee(), true); terminateServer(terminateDebuggee); return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> terminate(TerminateArguments args) { context.setTerminateRequestReceived(true); terminateServer(true); return CompletableFuture.completedFuture(null); } void terminateServer(boolean terminateDebuggee) { if (context.getLaunchedProcess().isPresent() && context.getLaunchedProcess().get().isAlive()) { killProcessWithDescendants(context.getLaunchedProcess().get()); } if (terminateDebuggee && context.getDebuggeeVM() != null) { int exitCode = 0; if (context.getDebuggeeVM().process() != null) { exitCode = killProcessWithDescendants(context.getDebuggeeVM().process()); } try { context.getDebuggeeVM().exit(exitCode); } catch (Exception ignored) { } } if (!context.isTerminateRequestReceived()) { ExitedEventArguments exitedEventArguments = new ExitedEventArguments(); exitedEventArguments.setExitCode(0); context.getClient().exited(exitedEventArguments); } if (executionManager != null) { String address = (executionManager.getHost().isPresent() && executionManager.getPort().isPresent()) ? executionManager.getHost().get() + ":" + executionManager.getPort().get() : VALUE_UNKNOWN; outputLogger.sendDebugServerOutput(String.format(System.lineSeparator() + "Disconnected from the target " + "VM, address: '%s'", address)); } new java.lang.Thread(() -> { try { java.lang.Thread.sleep(500); } catch (InterruptedException ignored) { } System.exit(0); }).start(); } private static int killProcessWithDescendants(Process parent) { try { parent.descendants().forEach(processHandle -> { boolean successful = processHandle.destroy(); if (!successful) { processHandle.destroyForcibly(); } }); parent.destroyForcibly(); parent.waitFor(); return parent.exitValue(); } catch (InterruptedException ignored) { return 0; } catch (Exception e) { return 1; } } public void connect(IDebugProtocolClient client) { this.client = client; } private synchronized void updateVariableToStackFrameMap(int parent, int child) { if (!variableToStackFrames.containsKey(parent)) { variableToStackFrames.put(child, parent); return; } Integer parentRef; do { parentRef = variableToStackFrames.get(parent); } while (variableToStackFrames.containsKey(parentRef)); variableToStackFrames.put(child, parentRef); } /** * Converts a JDI thread reference to a DAP thread instance. * * @param threadReference JDI thread reference * @return the corresponding DAP thread instance */ Thread toDapThread(ThreadReferenceProxyImpl threadReference) { Thread thread = new Thread(); thread.setId((int) threadReference.uniqueID()); thread.setName(threadReference.name()); return thread; } /** * Coverts a JDI stack frame instance to a DAP stack frame instance. */ private StackFrame toDapStackFrame(StackFrameProxyImpl stackFrameProxy) { try { if (!isBalStackFrame(stackFrameProxy.getStackFrame())) { return null; } int referenceId = nextVarReference.getAndIncrement(); stackFrames.put(referenceId, stackFrameProxy); BallerinaStackFrame balStackFrame = new BallerinaStackFrame(context, referenceId, stackFrameProxy); return balStackFrame.getAsDAPStackFrame().orElse(null); } catch (JdiProxyException e) { return null; } } private BalBreakpoint toBalBreakpoint(SourceBreakpoint sourceBreakpoint, Source source) { BalBreakpoint breakpoint = new BalBreakpoint(source, sourceBreakpoint.getLine()); breakpoint.setCondition(sourceBreakpoint.getCondition()); breakpoint.setLogMessage(sourceBreakpoint.getLogMessage()); return breakpoint; } /** * Returns a map of all currently running threads in the remote VM, against their unique ID. * <p> * Thread objects that have not yet been started (see {@link java.lang.Thread * and thread objects that have completed their execution are not included in the returned list. */ Map<Integer, ThreadReferenceProxyImpl> getAllThreads() { if (context.getDebuggeeVM() == null) { return null; } Collection<ThreadReference> threadReferences = context.getDebuggeeVM().getVirtualMachine().allThreads(); Map<Integer, ThreadReferenceProxyImpl> threadsMap = new HashMap<>(); for (ThreadReference threadReference : threadReferences) { threadsMap.put((int) threadReference.uniqueID(), new ThreadReferenceProxyImpl(context.getDebuggeeVM(), threadReference)); } return threadsMap; } /** * Returns a map of thread instances which correspond to an active ballerina strand, against their unique ID. */ Map<Integer, ThreadReferenceProxyImpl> getActiveStrandThreads() { Map<Integer, ThreadReferenceProxyImpl> allThreads = getAllThreads(); if (allThreads == null) { return null; } Map<Integer, ThreadReferenceProxyImpl> balStrandThreads = new HashMap<>(); allThreads.forEach((id, threadProxy) -> { ThreadReference threadReference = threadProxy.getThreadReference(); if (threadReference.status() == ThreadReference.THREAD_STATUS_RUNNING && !threadReference.name().equals("Reference Handler") && !threadReference.name().equals("Signal Dispatcher") && threadReference.isSuspended() && isBalStrand(threadReference) ) { balStrandThreads.put(id, new ThreadReferenceProxyImpl(context.getDebuggeeVM(), threadReference)); } }); return balStrandThreads; } /** * Validates whether the given DAP thread reference represents a ballerina strand. * <p> * * @param threadReference DAP thread reference * @return true if the given DAP thread reference represents a ballerina strand. */ private static boolean isBalStrand(ThreadReference threadReference) { try { return isBalStackFrame(threadReference.frames().get(0)); } catch (Exception e) { return false; } } /** * Validates whether the given DAP stack frame represents a ballerina call stack frame. * * @param frame DAP stack frame * @return true if the given DAP stack frame represents a ballerina call stack frame. */ static boolean isBalStackFrame(com.sun.jdi.StackFrame frame) { try { return frame.location().sourceName().endsWith(BAL_FILE_EXT); } catch (Exception e) { return false; } } /** * Validates a given ballerina stack frame for for its source information. * * @param stackFrame ballerina stack frame * @return true if its a valid ballerina frame */ static boolean isValidFrame(StackFrame stackFrame) { return stackFrame != null && stackFrame.getSource() != null && stackFrame.getLine() > 0; } /** * Asynchronously listens to remote debuggee stdout + error streams and redirects the output to the client debug * console. */ private void startListeningToProgramOutput() { CompletableFuture.runAsync(() -> { if (context.getLaunchedProcess().isEmpty()) { return; } try (BufferedReader errorStream = context.getErrorStream()) { String line; while ((line = errorStream.readLine()) != null) { outputLogger.sendConsoleOutput(line); if (context.getDebuggeeVM() == null && line.contains(COMPILATION_ERROR_MESSAGE)) { terminateServer(false); } } } catch (IOException ignored) { } }); CompletableFuture.runAsync(() -> { if (context.getLaunchedProcess().isEmpty()) { return; } try (BufferedReader inputStream = context.getInputStream()) { String line; outputLogger.sendDebugServerOutput("Waiting for debug process to start..." + System.lineSeparator() + System.lineSeparator()); while ((line = inputStream.readLine()) != null) { if (line.contains("Listening for transport dt_socket")) { attachToRemoteVM("", clientConfigHolder.getDebuggePort()); } else if (context.getDebuggeeVM() == null && line.contains(COMPILATION_ERROR_MESSAGE)) { terminateServer(false); } outputLogger.sendProgramOutput(line); } } catch (Exception e) { String host = clientConfigHolder instanceof ClientAttachConfigHolder ? ((ClientAttachConfigHolder) clientConfigHolder).getHostName().orElse(LOCAL_HOST) : LOCAL_HOST; String portName; try { portName = Integer.toString(clientConfigHolder.getDebuggePort()); } catch (ClientConfigurationException clientConfigurationException) { portName = VALUE_UNKNOWN; } LOGGER.error(e.getMessage()); outputLogger.sendDebugServerOutput(String.format("Failed to attach to the target VM, address: '%s:%s'.", host, portName)); terminateServer(context.getDebuggeeVM() != null); } }); } /** * Attach to the remote VM using host address and port. * * @param hostName host address * @param portName host port */ private void attachToRemoteVM(String hostName, int portName) throws IOException, IllegalConnectorArgumentsException { executionManager = new DebugExecutionManager(this); VirtualMachine attachedVm = executionManager.attach(hostName, portName); context.setDebuggeeVM(new VirtualMachineProxyImpl(attachedVm)); EventRequestManager erm = context.getEventManager(); ClassPrepareRequest classPrepareRequest = erm.createClassPrepareRequest(); classPrepareRequest.enable(); eventProcessor.startListening(); } /** * Clears previous state information and prepares for the given debug instruction type execution. */ private void prepareFor(DebugInstruction instruction) { clearState(); eventProcessor.restoreBreakpoints(instruction); context.setLastInstruction(instruction); } private Variable[] computeGlobalScopeVariables(VariablesArguments requestArgs) { int stackFrameReference = requestArgs.getVariablesReference(); String classQName = PackageUtils.getQualifiedClassName(suspendedContext, INIT_CLASS_NAME); List<ReferenceType> cls = suspendedContext.getAttachedVm().classesByName(classQName); if (cls.size() != 1) { return new Variable[0]; } List<CompletableFuture<Variable>> scheduledVariables = new ArrayList<>(); ReferenceType initClassReference = cls.get(0); for (Field field : initClassReference.allFields()) { String fieldName = Utils.decodeIdentifier(field.name()); if (!field.isPublic() || !field.isStatic() || fieldName.startsWith(GENERATED_VAR_PREFIX)) { continue; } Value fieldValue = initClassReference.getValue(field); scheduledVariables.add(computeVariableAsync(fieldName, fieldValue, stackFrameReference)); } return scheduledVariables.stream() .map(varFuture -> { try { return varFuture.get(); } catch (Exception ignored) { LOGGER.error("Failed to load some debug variables due to runtime exceptions."); return null; } }) .filter(Objects::nonNull) .toArray(Variable[]::new); } private Variable[] computeLocalScopeVariables(VariablesArguments args) throws Exception { StackFrameProxyImpl stackFrame = suspendedContext.getFrame(); List<CompletableFuture<Variable>> scheduledVariables = new ArrayList<>(); List<CompletableFuture<Variable[]>> scheduledLambdaMapVariables = new ArrayList<>(); List<LocalVariableProxyImpl> localVariableProxies = stackFrame.visibleVariables(); for (LocalVariableProxyImpl var : localVariableProxies) { String name = var.name(); Value value = stackFrame.getValue(var); if (VariableUtils.isLambdaParamMap(var)) { scheduledLambdaMapVariables.add(fetchLocalVariablesFromMap(args, stackFrame, var)); } else { CompletableFuture<Variable> dapVar = computeVariableAsync(name, value, args.getVariablesReference()); scheduledVariables.add(dapVar); } } List<Variable> resolvedVariables = new ArrayList<>(); scheduledVariables.forEach(varFuture -> { try { Variable variable = varFuture.get(); if (variable != null) { resolvedVariables.add(variable); } } catch (InterruptedException | ExecutionException e) { LOGGER.error("Failed to load some debug variables due to runtime exceptions."); } }); scheduledLambdaMapVariables.forEach(varFuture -> { try { Variable[] variables = varFuture.get(); if (variables != null) { resolvedVariables.addAll(Arrays.asList(variables)); } } catch (InterruptedException | ExecutionException e) { LOGGER.error("Failed to load some debug variables due to runtime exceptions."); } }); return resolvedVariables.toArray(new Variable[0]); } /** * Returns the list of local variables extracted from the given variable map, which contains local variables used * within lambda functions. * * @param args variable args * @param stackFrame parent stack frame instance * @param lambdaParamMapVar map variable instance * @return list of local variables extracted from the given variable map */ private CompletableFuture<Variable[]> fetchLocalVariablesFromMap(VariablesArguments args, StackFrameProxyImpl stackFrame, LocalVariableProxyImpl lambdaParamMapVar) { try { Value value = stackFrame.getValue(lambdaParamMapVar); CompletableFuture<Variable> scheduledVariable = computeVariableAsync("lambdaArgMap", value, args.getVariablesReference()); Variable dapVariable = scheduledVariable.get(); if (dapVariable == null || !dapVariable.getType().equals(BVariableType.MAP.getString())) { return new CompletableFuture<>(); } VariablesArguments childVarRequestArgs = new VariablesArguments(); childVarRequestArgs.setVariablesReference(dapVariable.getVariablesReference()); return computeChildVariablesAsync(childVarRequestArgs); } catch (Exception e) { return new CompletableFuture<>(); } } /** * Asynchronously computes a debugger adapter protocol supported variable instance from Coverts a given ballerina * runtime value instance. * * @param name variable name * @param value runtime value of the variable * @param stackFrameRef reference ID of the parent stack frame */ private CompletableFuture<Variable> computeVariableAsync(String name, Value value, Integer stackFrameRef) { return CompletableFuture.supplyAsync(() -> { BVariable variable = VariableFactory.getVariable(suspendedContext, name, value); if (variable == null) { return null; } if (variable instanceof BSimpleVariable) { variable.getDapVariable().setVariablesReference(0); } else if (variable instanceof BCompoundVariable) { int variableReference = nextVarReference.getAndIncrement(); variable.getDapVariable().setVariablesReference(variableReference); loadedCompoundVariables.put(variableReference, (BCompoundVariable) variable); updateVariableToStackFrameMap(stackFrameRef, variableReference); } return variable.getDapVariable(); }, variableExecutor); } private CompletableFuture<Variable[]> computeChildVariablesAsync(VariablesArguments args) { return CompletableFuture.supplyAsync(() -> computeChildVariables(args), variableExecutor); } private Variable[] computeChildVariables(VariablesArguments args) { BCompoundVariable parentVar = loadedCompoundVariables.get(args.getVariablesReference()); Integer stackFrameId = variableToStackFrames.get(args.getVariablesReference()); if (stackFrameId == null) { return new Variable[0]; } if (parentVar instanceof IndexedCompoundVariable) { int startIndex = (args.getStart() != null) ? args.getStart() : 0; int count = (args.getCount() != null) ? args.getCount() : 0; Either<Map<String, Value>, List<Value>> childVars = ((IndexedCompoundVariable) parentVar) .getIndexedChildVariables(startIndex, count); if (childVars.isLeft()) { return createVariableArrayFrom(args, childVars.getLeft()); } else if (childVars.isRight()) { return createVariableArrayFrom(args, childVars.getRight()); } return new Variable[0]; } else if (parentVar instanceof NamedCompoundVariable) { Map<String, Value> childVars = ((NamedCompoundVariable) parentVar).getNamedChildVariables(); return createVariableArrayFrom(args, childVars); } return new Variable[0]; } private Variable[] createVariableArrayFrom(VariablesArguments args, Map<String, Value> varMap) { return varMap.entrySet().stream().map(entry -> { String name = entry.getKey(); Value value = entry.getValue(); BVariable variable = VariableFactory.getVariable(suspendedContext, name, value); if (variable == null) { return null; } else if (variable instanceof BSimpleVariable) { variable.getDapVariable().setVariablesReference(0); } else if (variable instanceof BCompoundVariable) { int variableReference = nextVarReference.getAndIncrement(); variable.getDapVariable().setVariablesReference(variableReference); loadedCompoundVariables.put(variableReference, (BCompoundVariable) variable); updateVariableToStackFrameMap(args.getVariablesReference(), variableReference); } return variable.getDapVariable(); }).filter(Objects::nonNull).toArray(Variable[]::new); } private Variable[] createVariableArrayFrom(VariablesArguments args, List<Value> varMap) { int startIndex = (args.getStart() != null) ? args.getStart() : 0; AtomicInteger index = new AtomicInteger(startIndex); return varMap.stream().map(value -> { String name = String.format("[%d]", index.getAndIncrement()); BVariable variable = VariableFactory.getVariable(suspendedContext, name, value); if (variable == null) { return null; } else if (variable instanceof BSimpleVariable) { variable.getDapVariable().setVariablesReference(0); } else if (variable instanceof BCompoundVariable) { int variableReference = nextVarReference.getAndIncrement(); variable.getDapVariable().setVariablesReference(variableReference); loadedCompoundVariables.put(variableReference, (BCompoundVariable) variable); updateVariableToStackFrameMap(args.getVariablesReference(), variableReference); } return variable.getDapVariable(); }).filter(Objects::nonNull).toArray(Variable[]::new); } /** * Creates a {@link EvaluateResponse} from the given evaluation result variable. * * @param args evaluation arguments. * @param evaluationResult evaluation result variable */ private EvaluateResponse constructEvaluateResponse(EvaluateArguments args, BVariable evaluationResult) { EvaluateResponse response = new EvaluateResponse(); if (evaluationResult == null) { return response; } else if (evaluationResult instanceof BSimpleVariable) { evaluationResult.getDapVariable().setVariablesReference(0); } else if (evaluationResult instanceof BCompoundVariable) { int variableReference = nextVarReference.getAndIncrement(); evaluationResult.getDapVariable().setVariablesReference(variableReference); loadedCompoundVariables.put(variableReference, (BCompoundVariable) evaluationResult); updateVariableToStackFrameMap(args.getFrameId(), variableReference); } Variable dapVariable = evaluationResult.getDapVariable(); response.setResult(dapVariable.getValue()); response.setType(dapVariable.getType()); response.setIndexedVariables(dapVariable.getIndexedVariables()); response.setNamedVariables(dapVariable.getNamedVariables()); response.setVariablesReference(dapVariable.getVariablesReference()); return response; } /** * Clears state information. */ private void clearState() { suspendedContext = null; activeThread = null; stackFrames.clear(); loadedCompoundVariables.clear(); variableToStackFrames.clear(); scopeIdToFrameIds.clear(); threadStackTraces.clear(); nextVarReference.set(1); } }
would rather keep the comment and delete the commented code.
protected void runPendingJob() throws AlterCancelException { long numTablets = 0; AgentBatchTask batchTask = new AgentBatchTask(); MarkedCountDownLatch<Long, Long> countDownLatch; try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); Preconditions.checkState(table.getState() == OlapTable.OlapTableState.SCHEMA_CHANGE); MaterializedIndexMeta indexMeta = table.getIndexMetaByIndexId(table.getBaseIndexId()); numTablets = partitionIndexMap.values().stream().map(MaterializedIndex::getTablets).mapToLong(List::size).sum(); countDownLatch = new MarkedCountDownLatch<>((int) numTablets); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkState(partition != null); TStorageMedium storageMedium = table.getPartitionInfo().getDataProperty(partitionId).getStorageMedium(); Map<Long, MaterializedIndex> shadowIndexMap = partitionIndexMap.row(partitionId); for (Map.Entry<Long, MaterializedIndex> entry : shadowIndexMap.entrySet()) { long shadowIdxId = entry.getKey(); MaterializedIndex shadowIdx = entry.getValue(); short shadowShortKeyColumnCount = indexShortKeyMap.get(shadowIdxId); List<Column> shadowSchema = indexSchemaMap.get(shadowIdxId); long originIndexId = indexIdMap.get(shadowIdxId); KeysType originKeysType = table.getKeysTypeByIndexId(originIndexId); List<Column> originSchema = table.getSchemaByIndexId(originIndexId); List<Column> copiedShadowSchema = Lists.newArrayList(); for (Column column : shadowSchema) { Column.DefaultValueType defaultValueType = column.getDefaultValueType(); if (defaultValueType == Column.DefaultValueType.CONST) { Column copiedColumn = new Column(column); copiedColumn.setDefaultValue(column.calculatedDefaultValueWithTime(startTime)); copiedShadowSchema.add(copiedColumn); } else { copiedShadowSchema.add(column); } } List<Integer> copiedSortKeyIdxes = indexMeta.getSortKeyIdxes(); if (indexMeta.getSortKeyIdxes() != null) { if (originSchema.size() > shadowSchema.size()) { List<Column> differences = originSchema.stream().filter(element -> !shadowSchema.contains(element)).collect(Collectors.toList()); Integer dropIdx = new Integer(originSchema.indexOf(differences.get(0))); for (int i = 0; i < copiedSortKeyIdxes.size(); ++i) { Integer sortKeyIdx = copiedSortKeyIdxes.get(i); if (dropIdx < sortKeyIdx) { copiedSortKeyIdxes.set(i, sortKeyIdx - 1); } } } else if (originSchema.size() < shadowSchema.size()) { List<Column> differences = shadowSchema.stream().filter(element -> !originSchema.contains(element)).collect(Collectors.toList()); for (Column difference : differences) { int addColumnIdx = shadowSchema.indexOf(difference); for (int i = 0; i < copiedSortKeyIdxes.size(); ++i) { Integer sortKeyIdx = copiedSortKeyIdxes.get(i); int shadowSortKeyIdx = shadowSchema.indexOf(originSchema.get( indexMeta.getSortKeyIdxes().get(i))); if (addColumnIdx < shadowSortKeyIdx) { copiedSortKeyIdxes.set(i, sortKeyIdx + 1); } } } } } if (sortKeyIdxes != null) { copiedSortKeyIdxes = sortKeyIdxes; } else if (copiedSortKeyIdxes != null && !copiedSortKeyIdxes.isEmpty()) { sortKeyIdxes = copiedSortKeyIdxes; } for (Tablet shadowTablet : shadowIdx.getTablets()) { long shadowTabletId = shadowTablet.getId(); LakeTablet lakeTablet = ((LakeTablet) shadowTablet); Long backendId = Utils.chooseBackend(lakeTablet); if (backendId == null) { throw new AlterCancelException("No alive backend"); } countDownLatch.addMark(backendId, shadowTabletId); CreateReplicaTask createReplicaTask = new CreateReplicaTask(backendId, dbId, tableId, partitionId, shadowIdxId, shadowTabletId, shadowShortKeyColumnCount, 0, Partition.PARTITION_INIT_VERSION, originKeysType, TStorageType.COLUMN, storageMedium, copiedShadowSchema, bfColumns, bfFpp, countDownLatch, indexes, table.isInMemory(), table.enablePersistentIndex(), table.primaryIndexCacheExpireSec(), TTabletType.TABLET_TYPE_LAKE, table.getCompressionType(), copiedSortKeyIdxes); batchTask.addTask(createReplicaTask); } } } } catch (Exception e) { throw new AlterCancelException(e.getMessage()); } sendAgentTaskAndWait(batchTask, countDownLatch, Config.tablet_create_timeout_second * numTablets); try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); Preconditions.checkState(table.getState() == OlapTable.OlapTableState.SCHEMA_CHANGE); watershedTxnId = getNextTransactionId(); addShadowIndexToCatalog(table); } jobState = JobState.WAITING_TXN; if (span != null) { span.setAttribute("watershedTxnId", this.watershedTxnId); span.addEvent("setWaitingTxn"); } writeEditLog(this); LOG.info("transfer schema change job {} state to {}, watershed txn_id: {}", jobId, this.jobState, watershedTxnId); }
protected void runPendingJob() throws AlterCancelException { long numTablets = 0; AgentBatchTask batchTask = new AgentBatchTask(); MarkedCountDownLatch<Long, Long> countDownLatch; try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); Preconditions.checkState(table.getState() == OlapTable.OlapTableState.SCHEMA_CHANGE); MaterializedIndexMeta indexMeta = table.getIndexMetaByIndexId(table.getBaseIndexId()); numTablets = partitionIndexMap.values().stream().map(MaterializedIndex::getTablets).mapToLong(List::size).sum(); countDownLatch = new MarkedCountDownLatch<>((int) numTablets); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkState(partition != null); TStorageMedium storageMedium = table.getPartitionInfo().getDataProperty(partitionId).getStorageMedium(); Map<Long, MaterializedIndex> shadowIndexMap = partitionIndexMap.row(partitionId); for (Map.Entry<Long, MaterializedIndex> entry : shadowIndexMap.entrySet()) { long shadowIdxId = entry.getKey(); MaterializedIndex shadowIdx = entry.getValue(); short shadowShortKeyColumnCount = indexShortKeyMap.get(shadowIdxId); List<Column> shadowSchema = indexSchemaMap.get(shadowIdxId); long originIndexId = indexIdMap.get(shadowIdxId); KeysType originKeysType = table.getKeysTypeByIndexId(originIndexId); List<Column> originSchema = table.getSchemaByIndexId(originIndexId); List<Column> copiedShadowSchema = Lists.newArrayList(); for (Column column : shadowSchema) { Column.DefaultValueType defaultValueType = column.getDefaultValueType(); if (defaultValueType == Column.DefaultValueType.CONST) { Column copiedColumn = new Column(column); copiedColumn.setDefaultValue(column.calculatedDefaultValueWithTime(startTime)); copiedShadowSchema.add(copiedColumn); } else { copiedShadowSchema.add(column); } } List<Integer> copiedSortKeyIdxes = indexMeta.getSortKeyIdxes(); if (indexMeta.getSortKeyIdxes() != null) { if (originSchema.size() > shadowSchema.size()) { List<Column> differences = originSchema.stream().filter(element -> !shadowSchema.contains(element)).collect(Collectors.toList()); Integer dropIdx = new Integer(originSchema.indexOf(differences.get(0))); for (int i = 0; i < copiedSortKeyIdxes.size(); ++i) { Integer sortKeyIdx = copiedSortKeyIdxes.get(i); if (dropIdx < sortKeyIdx) { copiedSortKeyIdxes.set(i, sortKeyIdx - 1); } } } else if (originSchema.size() < shadowSchema.size()) { List<Column> differences = shadowSchema.stream().filter(element -> !originSchema.contains(element)).collect(Collectors.toList()); for (Column difference : differences) { int addColumnIdx = shadowSchema.indexOf(difference); for (int i = 0; i < copiedSortKeyIdxes.size(); ++i) { Integer sortKeyIdx = copiedSortKeyIdxes.get(i); int shadowSortKeyIdx = shadowSchema.indexOf(originSchema.get( indexMeta.getSortKeyIdxes().get(i))); if (addColumnIdx < shadowSortKeyIdx) { copiedSortKeyIdxes.set(i, sortKeyIdx + 1); } } } } } if (sortKeyIdxes != null) { copiedSortKeyIdxes = sortKeyIdxes; } else if (copiedSortKeyIdxes != null && !copiedSortKeyIdxes.isEmpty()) { sortKeyIdxes = copiedSortKeyIdxes; } for (Tablet shadowTablet : shadowIdx.getTablets()) { long shadowTabletId = shadowTablet.getId(); LakeTablet lakeTablet = ((LakeTablet) shadowTablet); Long backendId = Utils.chooseBackend(lakeTablet); if (backendId == null) { throw new AlterCancelException("No alive backend"); } countDownLatch.addMark(backendId, shadowTabletId); CreateReplicaTask createReplicaTask = new CreateReplicaTask(backendId, dbId, tableId, partitionId, shadowIdxId, shadowTabletId, shadowShortKeyColumnCount, 0, Partition.PARTITION_INIT_VERSION, originKeysType, TStorageType.COLUMN, storageMedium, copiedShadowSchema, bfColumns, bfFpp, countDownLatch, indexes, table.isInMemory(), table.enablePersistentIndex(), table.primaryIndexCacheExpireSec(), TTabletType.TABLET_TYPE_LAKE, table.getCompressionType(), copiedSortKeyIdxes); batchTask.addTask(createReplicaTask); } } } } catch (Exception e) { throw new AlterCancelException(e.getMessage()); } sendAgentTaskAndWait(batchTask, countDownLatch, Config.tablet_create_timeout_second * numTablets); try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); Preconditions.checkState(table.getState() == OlapTable.OlapTableState.SCHEMA_CHANGE); watershedTxnId = getNextTransactionId(); addShadowIndexToCatalog(table); } jobState = JobState.WAITING_TXN; if (span != null) { span.setAttribute("watershedTxnId", this.watershedTxnId); span.addEvent("setWaitingTxn"); } writeEditLog(this); LOG.info("transfer schema change job {} state to {}, watershed txn_id: {}", jobId, this.jobState, watershedTxnId); }
class LakeTableSchemaChangeJob extends AlterJobV2 { private static final Logger LOG = LogManager.getLogger(LakeTableSchemaChangeJob.class); @SerializedName(value = "partitionIndexTabletMap") private Table<Long, Long, Map<Long, Long>> partitionIndexTabletMap = HashBasedTable.create(); @SerializedName(value = "partitionIndexMap") private Table<Long, Long, MaterializedIndex> partitionIndexMap = HashBasedTable.create(); @SerializedName(value = "indexIdMap") private Map<Long, Long> indexIdMap = Maps.newHashMap(); @SerializedName(value = "indexIdToName") private Map<Long, String> indexIdToName = Maps.newHashMap(); @SerializedName(value = "indexSchemaMap") private Map<Long, List<Column>> indexSchemaMap = Maps.newHashMap(); @SerializedName(value = "indexShortKeyMap") private Map<Long, Short> indexShortKeyMap = Maps.newHashMap(); @SerializedName(value = "hasBfChange") private boolean hasBfChange; @SerializedName(value = "bfColumns") private Set<String> bfColumns = null; @SerializedName(value = "bfFpp") private double bfFpp = 0; @SerializedName(value = "indexChange") private boolean indexChange = false; @SerializedName(value = "indexes") private List<Index> indexes = null; @SerializedName(value = "watershedTxnId") protected long watershedTxnId = -1; @SerializedName(value = "startTime") private long startTime; @SerializedName(value = "commitVersionMap") private Map<Long, Long> commitVersionMap; @SerializedName(value = "sortKeyIdxes") private List<Integer> sortKeyIdxes; private AgentBatchTask schemaChangeBatchTask = new AgentBatchTask(); public LakeTableSchemaChangeJob(long jobId, long dbId, long tableId, String tableName, long timeoutMs) { super(jobId, JobType.SCHEMA_CHANGE, dbId, tableId, tableName, timeoutMs); } void setBloomFilterInfo(boolean hasBfChange, Set<String> bfColumns, double bfFpp) { this.hasBfChange = hasBfChange; this.bfColumns = bfColumns; this.bfFpp = bfFpp; } void setAlterIndexInfo(boolean indexChange, List<Index> indexes) { this.indexChange = indexChange; this.indexes = indexes; } void setStartTime(long startTime) { this.startTime = startTime; } void setSortKeyIdxes(List<Integer> sortKeyIdxes) { this.sortKeyIdxes = sortKeyIdxes; } void addTabletIdMap(long partitionId, long shadowIdxId, long shadowTabletId, long originTabletId) { Map<Long, Long> tabletMap = partitionIndexTabletMap.get(partitionId, shadowIdxId); if (tabletMap == null) { tabletMap = Maps.newHashMap(); partitionIndexTabletMap.put(partitionId, shadowIdxId, tabletMap); } tabletMap.put(shadowTabletId, originTabletId); } void addPartitionShadowIndex(long partitionId, long shadowIdxId, MaterializedIndex shadowIdx) { partitionIndexMap.put(partitionId, shadowIdxId, shadowIdx); } void addIndexSchema(long shadowIdxId, long originIdxId, @NotNull String shadowIndexName, short shadowIdxShortKeyCount, @NotNull List<Column> shadowIdxSchema) { indexIdMap.put(shadowIdxId, originIdxId); indexIdToName.put(shadowIdxId, shadowIndexName); indexShortKeyMap.put(shadowIdxId, shadowIdxShortKeyCount); indexSchemaMap.put(shadowIdxId, shadowIdxSchema); } void addShadowIndexToCatalog(@NotNull LakeTable table) { Preconditions.checkState(watershedTxnId != -1); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkState(partition != null); Map<Long, MaterializedIndex> shadowIndexMap = partitionIndexMap.row(partitionId); for (MaterializedIndex shadowIndex : shadowIndexMap.values()) { shadowIndex.setVisibleTxnId(watershedTxnId); Preconditions.checkState(shadowIndex.getState() == MaterializedIndex.IndexState.SHADOW, shadowIndex.getState()); partition.createRollupIndex(shadowIndex); } } for (long shadowIdxId : indexIdMap.keySet()) { table.setIndexMeta(shadowIdxId, indexIdToName.get(shadowIdxId), indexSchemaMap.get(shadowIdxId), 0, 0, indexShortKeyMap.get(shadowIdxId), TStorageType.COLUMN, table.getKeysTypeByIndexId(indexIdMap.get(shadowIdxId)), null, sortKeyIdxes); } table.rebuildFullSchema(); } @VisibleForTesting public long getWatershedTxnId() { return watershedTxnId; } @VisibleForTesting public static void sendAgentTask(AgentBatchTask batchTask) { AgentTaskQueue.addBatchTask(batchTask); AgentTaskExecutor.submit(batchTask); } @VisibleForTesting public static void sendAgentTaskAndWait(AgentBatchTask batchTask, MarkedCountDownLatch<Long, Long> countDownLatch, long timeoutSeconds) throws AlterCancelException { AgentTaskQueue.addBatchTask(batchTask); AgentTaskExecutor.submit(batchTask); long timeout = 1000L * Math.min(timeoutSeconds, Config.max_create_table_timeout_second); boolean ok = false; try { ok = countDownLatch.await(timeout, TimeUnit.MILLISECONDS) && countDownLatch.getStatus().ok(); } catch (InterruptedException e) { LOG.warn("InterruptedException: ", e); } if (!ok) { AgentTaskQueue.removeBatchTask(batchTask, TTaskType.CREATE); String errMsg; if (!countDownLatch.getStatus().ok()) { errMsg = countDownLatch.getStatus().getErrorMsg(); } else { List<Map.Entry<Long, Long>> unfinishedMarks = countDownLatch.getLeftMarks(); List<Map.Entry<Long, Long>> subList = unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 3)); errMsg = "Error tablets:" + Joiner.on(", ").join(subList); } throw new AlterCancelException("Create tablet failed. Error: " + errMsg); } } @VisibleForTesting public static void writeEditLog(LakeTableSchemaChangeJob job) { GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(job); } @VisibleForTesting public static Future<Boolean> writeEditLogAsync(LakeTableSchemaChangeJob job) { return GlobalStateMgr.getCurrentState().getEditLog().logAlterJobNoWait(job); } @VisibleForTesting public static long getNextTransactionId() { return GlobalStateMgr.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId(); } @Override @Override protected void runWaitingTxnJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.WAITING_TXN, jobState); try { if (!isPreviousLoadFinished(dbId, tableId, watershedTxnId)) { LOG.info("wait transactions before {} to be finished, schema change job: {}", watershedTxnId, jobId); return; } } catch (AnalysisException e) { throw new AlterCancelException(e.getMessage()); } LOG.info("previous transactions are all finished, begin to send schema change tasks. job: {}", jobId); try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); Preconditions.checkState(table.getState() == OlapTable.OlapTableState.SCHEMA_CHANGE); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkNotNull(partition, partitionId); long visibleVersion = partition.getVisibleVersion(); Map<Long, MaterializedIndex> shadowIndexMap = partitionIndexMap.row(partitionId); for (Map.Entry<Long, MaterializedIndex> entry : shadowIndexMap.entrySet()) { long shadowIdxId = entry.getKey(); MaterializedIndex shadowIdx = entry.getValue(); for (Tablet shadowTablet : shadowIdx.getTablets()) { Long backendId = Utils.chooseBackend((LakeTablet) shadowTablet); if (backendId == null) { throw new AlterCancelException("No alive backend"); } long shadowTabletId = shadowTablet.getId(); long originTabletId = partitionIndexTabletMap.row(partitionId).get(shadowIdxId).get(shadowTabletId); AlterReplicaTask alterTask = AlterReplicaTask.alterLakeTablet(backendId, dbId, tableId, partitionId, shadowIdxId, shadowTabletId, originTabletId, visibleVersion, jobId, watershedTxnId); getOrCreateSchemaChangeBatchTask().addTask(alterTask); } } partition.setMinRetainVersion(visibleVersion); } } sendAgentTask(getOrCreateSchemaChangeBatchTask()); this.jobState = JobState.RUNNING; if (span != null) { span.addEvent("setRunning"); } LOG.info("transfer schema change job {} state to {}", jobId, this.jobState); } @Override protected void runRunningJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.RUNNING, jobState); if (tableHasBeenDropped()) { throw new AlterCancelException("Table or database does not exist"); } if (!getOrCreateSchemaChangeBatchTask().isFinished()) { LOG.info("schema change tasks not finished. job: {}", jobId); List<AgentTask> tasks = getOrCreateSchemaChangeBatchTask().getUnfinishedTasks(2000); AgentTask task = tasks.stream().filter(t -> t.getFailedTimes() >= 3).findAny().orElse(null); if (task != null) { throw new AlterCancelException("schema change task failed after try three times: " + task.getErrorMsg()); } else { return; } } try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); commitVersionMap = new HashMap<>(); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkNotNull(partition, partitionId); partition.setMinRetainVersion(0); long commitVersion = partition.getNextVersion(); commitVersionMap.put(partitionId, commitVersion); LOG.debug("commit version of partition {} is {}. jobId={}", partitionId, commitVersion, jobId); } this.jobState = JobState.FINISHED_REWRITING; writeEditLog(this); updateNextVersion(table); } if (span != null) { span.addEvent("finishedRewriting"); } LOG.info("schema change job finished rewriting historical data: {}", jobId); } @Override protected void runFinishedRewritingJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.FINISHED_REWRITING); if (!readyToPublishVersion()) { return; } if (!publishVersion()) { LOG.info("publish version failed, will retry later. jobId={}", jobId); return; } long startWriteTs; Future<Boolean> editLogFuture; Set<String> modifiedColumns; List<MaterializedIndex> droppedIndexes; try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = (db != null) ? db.getTable(tableId) : null; if (table == null) { LOG.info("database or table been dropped while doing schema change job {}", jobId); return; } modifiedColumns = collectModifiedColumnsForRelatedMVs(table); droppedIndexes = visualiseShadowIndex(table); try { GlobalStateMgr.getCurrentColocateIndex().updateLakeTableColocationInfo(table, true, null); } catch (DdlException e) { LOG.error("table {} update colocation info failed after schema change, {}.", tableId, e.getMessage()); } inactiveRelatedMv(modifiedColumns, table); table.onReload(); this.jobState = JobState.FINISHED; this.finishedTimeMs = System.currentTimeMillis(); startWriteTs = System.nanoTime(); editLogFuture = writeEditLogAsync(this); } EditLog.waitInfinity(startWriteTs, editLogFuture); for (MaterializedIndex droppedIndex : droppedIndexes) { List<Long> shards = droppedIndex.getTablets().stream().map(Tablet::getId).collect(Collectors.toList()); StarMgrMetaSyncer.dropTabletAndDeleteShard(shards, GlobalStateMgr.getCurrentStarOSAgent()); } if (span != null) { span.end(); } LOG.info("schema change job finished: {}", jobId); } boolean readyToPublishVersion() throws AlterCancelException { try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkState(partition != null, partitionId); long commitVersion = commitVersionMap.get(partitionId); if (commitVersion != partition.getVisibleVersion() + 1) { Preconditions.checkState(partition.getVisibleVersion() < commitVersion, "partition=" + partitionId + " visibleVersion=" + partition.getVisibleVersion() + " commitVersion=" + commitVersion); return false; } } } return true; } boolean publishVersion() { try { for (long partitionId : partitionIndexMap.rowKeySet()) { long commitVersion = commitVersionMap.get(partitionId); Map<Long, MaterializedIndex> shadowIndexMap = partitionIndexMap.row(partitionId); for (MaterializedIndex shadowIndex : shadowIndexMap.values()) { Utils.publishVersion(shadowIndex.getTablets(), watershedTxnId, 1, commitVersion); } } return true; } catch (Exception e) { LOG.error("Fail to publish version for schema change job {}: {}", jobId, e.getMessage()); return false; } } private Set<String> collectModifiedColumnsForRelatedMVs(@NotNull LakeTable tbl) { if (tbl.getRelatedMaterializedViews().isEmpty()) { return Sets.newHashSet(); } Set<String> modifiedColumns = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER); for (Map.Entry<Long, List<Column>> entry : indexSchemaMap.entrySet()) { Long shadowIdxId = entry.getKey(); long originIndexId = indexIdMap.get(shadowIdxId); List<Column> shadowSchema = entry.getValue(); List<Column> originSchema = tbl.getSchemaByIndexId(originIndexId); if (shadowSchema.size() == originSchema.size()) { for (Column col : shadowSchema) { if (col.isNameWithPrefix(SchemaChangeHandler.SHADOW_NAME_PRFIX)) { modifiedColumns.add(col.getNameWithoutPrefix(SchemaChangeHandler.SHADOW_NAME_PRFIX)); } } } else if (shadowSchema.size() < originSchema.size()) { List<Column> differences = originSchema.stream().filter(element -> !shadowSchema.contains(element)).collect(Collectors.toList()); Integer dropIdx = new Integer(originSchema.indexOf(differences.get(0))); modifiedColumns.add(originSchema.get(dropIdx).getName()); } else { } } return modifiedColumns; } private void inactiveRelatedMv(Set<String> modifiedColumns, @NotNull LakeTable tbl) { if (modifiedColumns.isEmpty()) { return; } Database db = GlobalStateMgr.getCurrentState().getDb(dbId); for (MvId mvId : tbl.getRelatedMaterializedViews()) { MaterializedView mv = (MaterializedView) db.getTable(mvId.getId()); if (mv == null) { LOG.warn("Ignore materialized view {} does not exists", mvId); continue; } for (Column mvColumn : mv.getColumns()) { if (modifiedColumns.contains(mvColumn.getName())) { LOG.warn("Setting the materialized view {}({}) to invalid because " + "the column {} of the table {} was modified.", mv.getName(), mv.getId(), mvColumn.getName(), tbl.getName()); mv.setInactiveAndReason( "base-table schema changed for columns: " + StringUtils.join(modifiedColumns, ",")); return; } } } } boolean tableHasBeenDropped() { try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { return db == null || db.getTable(tableId) == null; } } void updateNextVersion(@NotNull LakeTable table) { for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); long commitVersion = commitVersionMap.get(partitionId); Preconditions.checkState(partition.getNextVersion() == commitVersion, "partitionNextVersion=" + partition.getNextVersion() + " commitVersion=" + commitVersion); partition.setNextVersion(commitVersion + 1); } } @NotNull LakeTable getTableOrThrow(@Nullable LockedDatabase db, long tableId) throws AlterCancelException { if (db == null) { throw new AlterCancelException("Database does not exist"); } LakeTable table = db.getTable(tableId); if (table == null) { throw new AlterCancelException("Table does not exist. tableId=" + tableId); } return table; } @Override public void replay(AlterJobV2 replayedJob) { LakeTableSchemaChangeJob other = (LakeTableSchemaChangeJob) replayedJob; LOG.info("Replaying lake table schema change job. state={} jobId={}", replayedJob.jobState, replayedJob.jobId); if (this != other) { Preconditions.checkState(this.type.equals(other.type)); Preconditions.checkState(this.jobId == other.jobId); Preconditions.checkState(this.dbId == other.dbId); Preconditions.checkState(this.tableId == other.tableId); this.jobState = other.jobState; this.createTimeMs = other.createTimeMs; this.finishedTimeMs = other.finishedTimeMs; this.errMsg = other.errMsg; this.timeoutMs = other.timeoutMs; this.partitionIndexTabletMap = other.partitionIndexTabletMap; this.partitionIndexMap = other.partitionIndexMap; this.indexIdMap = other.indexIdMap; this.indexIdToName = other.indexIdToName; this.indexSchemaMap = other.indexSchemaMap; this.indexShortKeyMap = other.indexShortKeyMap; this.hasBfChange = other.hasBfChange; this.bfColumns = other.bfColumns; this.bfFpp = other.bfFpp; this.indexChange = other.indexChange; this.indexes = other.indexes; this.watershedTxnId = other.watershedTxnId; this.startTime = other.startTime; this.commitVersionMap = other.commitVersionMap; } try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = (db != null) ? db.getTable(tableId) : null; if (table == null) { return; } if (jobState == JobState.PENDING) { addTabletToTabletInvertedIndex(table); table.setState(OlapTable.OlapTableState.SCHEMA_CHANGE); } else if (jobState == JobState.WAITING_TXN) { addShadowIndexToCatalog(table); } else if (jobState == JobState.FINISHED_REWRITING) { updateNextVersion(table); } else if (jobState == JobState.FINISHED) { table.onReload(); visualiseShadowIndex(table); } else if (jobState == JobState.CANCELLED) { removeShadowIndex(table); } else { throw new RuntimeException("unknown job state '{}'" + jobState.name()); } } } void addTabletToTabletInvertedIndex(@NotNull LakeTable table) { TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex(); for (Table.Cell<Long, Long, MaterializedIndex> cell : partitionIndexMap.cellSet()) { Long partitionId = cell.getRowKey(); Long shadowIndexId = cell.getColumnKey(); MaterializedIndex shadowIndex = cell.getValue(); assert partitionId != null; assert shadowIndexId != null; assert shadowIndex != null; TStorageMedium medium = table.getPartitionInfo().getDataProperty(partitionId).getStorageMedium(); TabletMeta shadowTabletMeta = new TabletMeta(dbId, tableId, partitionId, shadowIndexId, 0, medium, true); for (Tablet shadowTablet : shadowIndex.getTablets()) { invertedIndex.addTablet(shadowTablet.getId(), shadowTabletMeta); } } } void removeShadowIndex(@NotNull LakeTable table) { for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkNotNull(partition, partitionId); partition.setMinRetainVersion(0); for (MaterializedIndex shadowIdx : partitionIndexMap.row(partitionId).values()) { partition.deleteRollupIndex(shadowIdx.getId()); } } for (String shadowIndexName : indexIdToName.values()) { table.deleteIndexInfo(shadowIndexName); } TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex(); for (long partitionId : partitionIndexMap.rowKeySet()) { for (MaterializedIndex shadowIdx : partitionIndexMap.row(partitionId).values()) { for (Tablet tablet : shadowIdx.getTablets()) { invertedIndex.deleteTablet(tablet.getId()); } } } table.setState(OlapTable.OlapTableState.NORMAL); } @NotNull List<MaterializedIndex> visualiseShadowIndex(@NotNull LakeTable table) { List<MaterializedIndex> droppedIndexes = new ArrayList<>(); TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex(); for (Column column : table.getColumns()) { if (Type.VARCHAR.equals(column.getType())) { IDictManager.getInstance().removeGlobalDict(table.getId(), column.getName()); } } for (Partition partition : table.getPartitions()) { Preconditions.checkState(commitVersionMap.containsKey(partition.getId())); long commitVersion = commitVersionMap.get(partition.getId()); LOG.debug("update partition visible version. partition=" + partition.getId() + " commitVersion=" + commitVersion); Preconditions.checkState(commitVersion == partition.getVisibleVersion() + 1, commitVersion + " vs " + partition.getVisibleVersion()); partition.setVisibleVersion(commitVersion, finishedTimeMs); LOG.debug("update visible version of partition {} to {}. jobId={}", partition.getId(), commitVersion, jobId); TStorageMedium medium = table.getPartitionInfo().getDataProperty(partition.getId()).getStorageMedium(); for (Map.Entry<Long, Long> entry : indexIdMap.entrySet()) { long shadowIdxId = entry.getKey(); long originIdxId = entry.getValue(); MaterializedIndex shadowIdx = partition.getIndex(shadowIdxId); Preconditions.checkNotNull(shadowIdx, shadowIdxId); MaterializedIndex droppedIdx; if (originIdxId == partition.getBaseIndex().getId()) { droppedIdx = partition.getBaseIndex(); } else { droppedIdx = partition.deleteRollupIndex(originIdxId); } Preconditions.checkNotNull(droppedIdx, originIdxId + " vs. " + shadowIdxId); TabletMeta shadowTabletMeta = new TabletMeta(dbId, tableId, partition.getId(), shadowIdxId, 0, medium, true); for (Tablet tablet : shadowIdx.getTablets()) { invertedIndex.addTablet(tablet.getId(), shadowTabletMeta); } partition.visualiseShadowIndex(shadowIdxId, originIdxId == partition.getBaseIndex().getId()); for (Tablet originTablet : droppedIdx.getTablets()) { invertedIndex.deleteTablet(originTablet.getId()); } droppedIndexes.add(droppedIdx); } } for (Map.Entry<Long, Long> entry : indexIdMap.entrySet()) { long shadowIdxId = entry.getKey(); long originIdxId = entry.getValue(); String shadowIdxName = table.getIndexNameById(shadowIdxId); String originIdxName = table.getIndexNameById(originIdxId); table.deleteIndexInfo(originIdxName); table.renameIndexForSchemaChange(shadowIdxName, originIdxName); table.renameColumnNamePrefix(shadowIdxId); if (originIdxId == table.getBaseIndexId()) { table.setBaseIndexId(shadowIdxId); } } table.rebuildFullSchema(); if (hasBfChange) { table.setBloomFilterInfo(bfColumns, bfFpp); } if (indexChange) { table.setIndexes(indexes); } table.setState(OlapTable.OlapTableState.NORMAL); return droppedIndexes; } @Override protected boolean cancelImpl(String errMsg) { if (jobState == JobState.CANCELLED || jobState == JobState.FINISHED) { return false; } if (jobState == JobState.FINISHED_REWRITING && tableExists()) { return false; } if (schemaChangeBatchTask != null) { AgentTaskQueue.removeBatchTask(schemaChangeBatchTask, TTaskType.ALTER); } try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = (db != null) ? db.getTable(tableId) : null; if (table != null) { removeShadowIndex(table); } } this.jobState = JobState.CANCELLED; this.errMsg = errMsg; this.finishedTimeMs = System.currentTimeMillis(); if (span != null) { span.setStatus(StatusCode.ERROR, errMsg); span.end(); } writeEditLog(this); return true; } AgentBatchTask getOrCreateSchemaChangeBatchTask() { if (schemaChangeBatchTask == null) { schemaChangeBatchTask = new AgentBatchTask(); } return schemaChangeBatchTask; } @Override protected void getInfo(List<List<Comparable>> infos) { String progress = FeConstants.NULL_STRING; if (jobState == JobState.RUNNING && schemaChangeBatchTask.getTaskNum() > 0) { progress = schemaChangeBatchTask.getFinishedTaskNum() + "/" + schemaChangeBatchTask.getTaskNum(); } for (Map.Entry<Long, Long> entry : indexIdMap.entrySet()) { long shadowIndexId = entry.getKey(); List<Comparable> info = Lists.newArrayList(); info.add(jobId); info.add(tableName); info.add(TimeUtils.longToTimeString(createTimeMs)); info.add(TimeUtils.longToTimeString(finishedTimeMs)); info.add(Column.removeNamePrefix(indexIdToName.get(shadowIndexId))); info.add(shadowIndexId); info.add(entry.getValue()); info.add("0:0"); info.add(watershedTxnId); info.add(jobState.name()); info.add(errMsg); info.add(progress); info.add(timeoutMs / 1000); infos.add(info); } } private boolean tableExists() { try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { return db != null && db.getTable(tableId) != null; } } @Override public void write(DataOutput out) throws IOException { String json = GsonUtils.GSON.toJson(this, AlterJobV2.class); Text.writeString(out, json); } @Nullable ReadLockedDatabase getReadLockedDatabase(long dbId) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); return db != null ? new ReadLockedDatabase(db) : null; } @Nullable WriteLockedDatabase getWriteLockedDatabase(long dbId) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); return db != null ? new WriteLockedDatabase(db) : null; } @VisibleForTesting public boolean isPreviousLoadFinished(long dbId, long tableId, long txnId) throws AnalysisException { GlobalTransactionMgr globalTxnMgr = GlobalStateMgr.getCurrentGlobalTransactionMgr(); return globalTxnMgr.isPreviousTransactionsFinished(txnId, dbId, Lists.newArrayList(tableId)); } private abstract static class LockedDatabase implements AutoCloseable { private final Database db; LockedDatabase(@NotNull Database db) { lock(db); this.db = db; } abstract void lock(Database db); abstract void unlock(Database db); @Nullable LakeTable getTable(long tableId) { return (LakeTable) db.getTable(tableId); } @Override public void close() { unlock(db); } } private static class ReadLockedDatabase extends LockedDatabase { ReadLockedDatabase(@NotNull Database db) { super(db); } @Override void lock(Database db) { db.readLock(); } @Override void unlock(Database db) { db.readUnlock(); } } private static class WriteLockedDatabase extends LockedDatabase { WriteLockedDatabase(@NotNull Database db) { super(db); } @Override void lock(Database db) { db.writeLock(); } @Override void unlock(Database db) { db.writeUnlock(); } } @Override public Optional<Long> getTransactionId() { return watershedTxnId < 0 ? Optional.empty() : Optional.of(watershedTxnId); } }
class LakeTableSchemaChangeJob extends AlterJobV2 { private static final Logger LOG = LogManager.getLogger(LakeTableSchemaChangeJob.class); @SerializedName(value = "partitionIndexTabletMap") private Table<Long, Long, Map<Long, Long>> partitionIndexTabletMap = HashBasedTable.create(); @SerializedName(value = "partitionIndexMap") private Table<Long, Long, MaterializedIndex> partitionIndexMap = HashBasedTable.create(); @SerializedName(value = "indexIdMap") private Map<Long, Long> indexIdMap = Maps.newHashMap(); @SerializedName(value = "indexIdToName") private Map<Long, String> indexIdToName = Maps.newHashMap(); @SerializedName(value = "indexSchemaMap") private Map<Long, List<Column>> indexSchemaMap = Maps.newHashMap(); @SerializedName(value = "indexShortKeyMap") private Map<Long, Short> indexShortKeyMap = Maps.newHashMap(); @SerializedName(value = "hasBfChange") private boolean hasBfChange; @SerializedName(value = "bfColumns") private Set<String> bfColumns = null; @SerializedName(value = "bfFpp") private double bfFpp = 0; @SerializedName(value = "indexChange") private boolean indexChange = false; @SerializedName(value = "indexes") private List<Index> indexes = null; @SerializedName(value = "watershedTxnId") protected long watershedTxnId = -1; @SerializedName(value = "startTime") private long startTime; @SerializedName(value = "commitVersionMap") private Map<Long, Long> commitVersionMap; @SerializedName(value = "sortKeyIdxes") private List<Integer> sortKeyIdxes; private AgentBatchTask schemaChangeBatchTask = new AgentBatchTask(); public LakeTableSchemaChangeJob(long jobId, long dbId, long tableId, String tableName, long timeoutMs) { super(jobId, JobType.SCHEMA_CHANGE, dbId, tableId, tableName, timeoutMs); } void setBloomFilterInfo(boolean hasBfChange, Set<String> bfColumns, double bfFpp) { this.hasBfChange = hasBfChange; this.bfColumns = bfColumns; this.bfFpp = bfFpp; } void setAlterIndexInfo(boolean indexChange, List<Index> indexes) { this.indexChange = indexChange; this.indexes = indexes; } void setStartTime(long startTime) { this.startTime = startTime; } void setSortKeyIdxes(List<Integer> sortKeyIdxes) { this.sortKeyIdxes = sortKeyIdxes; } void addTabletIdMap(long partitionId, long shadowIdxId, long shadowTabletId, long originTabletId) { Map<Long, Long> tabletMap = partitionIndexTabletMap.get(partitionId, shadowIdxId); if (tabletMap == null) { tabletMap = Maps.newHashMap(); partitionIndexTabletMap.put(partitionId, shadowIdxId, tabletMap); } tabletMap.put(shadowTabletId, originTabletId); } void addPartitionShadowIndex(long partitionId, long shadowIdxId, MaterializedIndex shadowIdx) { partitionIndexMap.put(partitionId, shadowIdxId, shadowIdx); } void addIndexSchema(long shadowIdxId, long originIdxId, @NotNull String shadowIndexName, short shadowIdxShortKeyCount, @NotNull List<Column> shadowIdxSchema) { indexIdMap.put(shadowIdxId, originIdxId); indexIdToName.put(shadowIdxId, shadowIndexName); indexShortKeyMap.put(shadowIdxId, shadowIdxShortKeyCount); indexSchemaMap.put(shadowIdxId, shadowIdxSchema); } void addShadowIndexToCatalog(@NotNull LakeTable table) { Preconditions.checkState(watershedTxnId != -1); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkState(partition != null); Map<Long, MaterializedIndex> shadowIndexMap = partitionIndexMap.row(partitionId); for (MaterializedIndex shadowIndex : shadowIndexMap.values()) { shadowIndex.setVisibleTxnId(watershedTxnId); Preconditions.checkState(shadowIndex.getState() == MaterializedIndex.IndexState.SHADOW, shadowIndex.getState()); partition.createRollupIndex(shadowIndex); } } for (long shadowIdxId : indexIdMap.keySet()) { table.setIndexMeta(shadowIdxId, indexIdToName.get(shadowIdxId), indexSchemaMap.get(shadowIdxId), 0, 0, indexShortKeyMap.get(shadowIdxId), TStorageType.COLUMN, table.getKeysTypeByIndexId(indexIdMap.get(shadowIdxId)), null, sortKeyIdxes); } table.rebuildFullSchema(); } @VisibleForTesting public long getWatershedTxnId() { return watershedTxnId; } @VisibleForTesting public static void sendAgentTask(AgentBatchTask batchTask) { AgentTaskQueue.addBatchTask(batchTask); AgentTaskExecutor.submit(batchTask); } @VisibleForTesting public static void sendAgentTaskAndWait(AgentBatchTask batchTask, MarkedCountDownLatch<Long, Long> countDownLatch, long timeoutSeconds) throws AlterCancelException { AgentTaskQueue.addBatchTask(batchTask); AgentTaskExecutor.submit(batchTask); long timeout = 1000L * Math.min(timeoutSeconds, Config.max_create_table_timeout_second); boolean ok = false; try { ok = countDownLatch.await(timeout, TimeUnit.MILLISECONDS) && countDownLatch.getStatus().ok(); } catch (InterruptedException e) { LOG.warn("InterruptedException: ", e); } if (!ok) { AgentTaskQueue.removeBatchTask(batchTask, TTaskType.CREATE); String errMsg; if (!countDownLatch.getStatus().ok()) { errMsg = countDownLatch.getStatus().getErrorMsg(); } else { List<Map.Entry<Long, Long>> unfinishedMarks = countDownLatch.getLeftMarks(); List<Map.Entry<Long, Long>> subList = unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 3)); errMsg = "Error tablets:" + Joiner.on(", ").join(subList); } throw new AlterCancelException("Create tablet failed. Error: " + errMsg); } } @VisibleForTesting public static void writeEditLog(LakeTableSchemaChangeJob job) { GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(job); } @VisibleForTesting public static Future<Boolean> writeEditLogAsync(LakeTableSchemaChangeJob job) { return GlobalStateMgr.getCurrentState().getEditLog().logAlterJobNoWait(job); } @VisibleForTesting public static long getNextTransactionId() { return GlobalStateMgr.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId(); } @Override @Override protected void runWaitingTxnJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.WAITING_TXN, jobState); try { if (!isPreviousLoadFinished(dbId, tableId, watershedTxnId)) { LOG.info("wait transactions before {} to be finished, schema change job: {}", watershedTxnId, jobId); return; } } catch (AnalysisException e) { throw new AlterCancelException(e.getMessage()); } LOG.info("previous transactions are all finished, begin to send schema change tasks. job: {}", jobId); try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); Preconditions.checkState(table.getState() == OlapTable.OlapTableState.SCHEMA_CHANGE); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkNotNull(partition, partitionId); long visibleVersion = partition.getVisibleVersion(); Map<Long, MaterializedIndex> shadowIndexMap = partitionIndexMap.row(partitionId); for (Map.Entry<Long, MaterializedIndex> entry : shadowIndexMap.entrySet()) { long shadowIdxId = entry.getKey(); MaterializedIndex shadowIdx = entry.getValue(); for (Tablet shadowTablet : shadowIdx.getTablets()) { Long backendId = Utils.chooseBackend((LakeTablet) shadowTablet); if (backendId == null) { throw new AlterCancelException("No alive backend"); } long shadowTabletId = shadowTablet.getId(); long originTabletId = partitionIndexTabletMap.row(partitionId).get(shadowIdxId).get(shadowTabletId); AlterReplicaTask alterTask = AlterReplicaTask.alterLakeTablet(backendId, dbId, tableId, partitionId, shadowIdxId, shadowTabletId, originTabletId, visibleVersion, jobId, watershedTxnId); getOrCreateSchemaChangeBatchTask().addTask(alterTask); } } partition.setMinRetainVersion(visibleVersion); } } sendAgentTask(getOrCreateSchemaChangeBatchTask()); this.jobState = JobState.RUNNING; if (span != null) { span.addEvent("setRunning"); } LOG.info("transfer schema change job {} state to {}", jobId, this.jobState); } @Override protected void runRunningJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.RUNNING, jobState); if (tableHasBeenDropped()) { throw new AlterCancelException("Table or database does not exist"); } if (!getOrCreateSchemaChangeBatchTask().isFinished()) { LOG.info("schema change tasks not finished. job: {}", jobId); List<AgentTask> tasks = getOrCreateSchemaChangeBatchTask().getUnfinishedTasks(2000); AgentTask task = tasks.stream().filter(t -> t.getFailedTimes() >= 3).findAny().orElse(null); if (task != null) { throw new AlterCancelException("schema change task failed after try three times: " + task.getErrorMsg()); } else { return; } } try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); commitVersionMap = new HashMap<>(); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkNotNull(partition, partitionId); partition.setMinRetainVersion(0); long commitVersion = partition.getNextVersion(); commitVersionMap.put(partitionId, commitVersion); LOG.debug("commit version of partition {} is {}. jobId={}", partitionId, commitVersion, jobId); } this.jobState = JobState.FINISHED_REWRITING; writeEditLog(this); updateNextVersion(table); } if (span != null) { span.addEvent("finishedRewriting"); } LOG.info("schema change job finished rewriting historical data: {}", jobId); } @Override protected void runFinishedRewritingJob() throws AlterCancelException { Preconditions.checkState(jobState == JobState.FINISHED_REWRITING); if (!readyToPublishVersion()) { return; } if (!publishVersion()) { LOG.info("publish version failed, will retry later. jobId={}", jobId); return; } long startWriteTs; Future<Boolean> editLogFuture; Set<String> modifiedColumns; List<MaterializedIndex> droppedIndexes; try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = (db != null) ? db.getTable(tableId) : null; if (table == null) { LOG.info("database or table been dropped while doing schema change job {}", jobId); return; } modifiedColumns = collectModifiedColumnsForRelatedMVs(table); droppedIndexes = visualiseShadowIndex(table); try { GlobalStateMgr.getCurrentColocateIndex().updateLakeTableColocationInfo(table, true, null); } catch (DdlException e) { LOG.error("table {} update colocation info failed after schema change, {}.", tableId, e.getMessage()); } inactiveRelatedMv(modifiedColumns, table); table.onReload(); this.jobState = JobState.FINISHED; this.finishedTimeMs = System.currentTimeMillis(); startWriteTs = System.nanoTime(); editLogFuture = writeEditLogAsync(this); } EditLog.waitInfinity(startWriteTs, editLogFuture); for (MaterializedIndex droppedIndex : droppedIndexes) { List<Long> shards = droppedIndex.getTablets().stream().map(Tablet::getId).collect(Collectors.toList()); StarMgrMetaSyncer.dropTabletAndDeleteShard(shards, GlobalStateMgr.getCurrentStarOSAgent()); } if (span != null) { span.end(); } LOG.info("schema change job finished: {}", jobId); } boolean readyToPublishVersion() throws AlterCancelException { try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkState(partition != null, partitionId); long commitVersion = commitVersionMap.get(partitionId); if (commitVersion != partition.getVisibleVersion() + 1) { Preconditions.checkState(partition.getVisibleVersion() < commitVersion, "partition=" + partitionId + " visibleVersion=" + partition.getVisibleVersion() + " commitVersion=" + commitVersion); return false; } } } return true; } boolean publishVersion() { try { for (long partitionId : partitionIndexMap.rowKeySet()) { long commitVersion = commitVersionMap.get(partitionId); Map<Long, MaterializedIndex> shadowIndexMap = partitionIndexMap.row(partitionId); for (MaterializedIndex shadowIndex : shadowIndexMap.values()) { Utils.publishVersion(shadowIndex.getTablets(), watershedTxnId, 1, commitVersion); } } return true; } catch (Exception e) { LOG.error("Fail to publish version for schema change job {}: {}", jobId, e.getMessage()); return false; } } private Set<String> collectModifiedColumnsForRelatedMVs(@NotNull LakeTable tbl) { if (tbl.getRelatedMaterializedViews().isEmpty()) { return Sets.newHashSet(); } Set<String> modifiedColumns = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER); for (Map.Entry<Long, List<Column>> entry : indexSchemaMap.entrySet()) { Long shadowIdxId = entry.getKey(); long originIndexId = indexIdMap.get(shadowIdxId); List<Column> shadowSchema = entry.getValue(); List<Column> originSchema = tbl.getSchemaByIndexId(originIndexId); if (shadowSchema.size() == originSchema.size()) { for (Column col : shadowSchema) { if (col.isNameWithPrefix(SchemaChangeHandler.SHADOW_NAME_PRFIX)) { modifiedColumns.add(col.getNameWithoutPrefix(SchemaChangeHandler.SHADOW_NAME_PRFIX)); } } } else if (shadowSchema.size() < originSchema.size()) { List<Column> differences = originSchema.stream().filter(element -> !shadowSchema.contains(element)).collect(Collectors.toList()); Integer dropIdx = new Integer(originSchema.indexOf(differences.get(0))); modifiedColumns.add(originSchema.get(dropIdx).getName()); } else { } } return modifiedColumns; } private void inactiveRelatedMv(Set<String> modifiedColumns, @NotNull LakeTable tbl) { if (modifiedColumns.isEmpty()) { return; } Database db = GlobalStateMgr.getCurrentState().getDb(dbId); for (MvId mvId : tbl.getRelatedMaterializedViews()) { MaterializedView mv = (MaterializedView) db.getTable(mvId.getId()); if (mv == null) { LOG.warn("Ignore materialized view {} does not exists", mvId); continue; } for (Column mvColumn : mv.getColumns()) { if (modifiedColumns.contains(mvColumn.getName())) { LOG.warn("Setting the materialized view {}({}) to invalid because " + "the column {} of the table {} was modified.", mv.getName(), mv.getId(), mvColumn.getName(), tbl.getName()); mv.setInactiveAndReason( "base-table schema changed for columns: " + StringUtils.join(modifiedColumns, ",")); return; } } } } boolean tableHasBeenDropped() { try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { return db == null || db.getTable(tableId) == null; } } void updateNextVersion(@NotNull LakeTable table) { for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); long commitVersion = commitVersionMap.get(partitionId); Preconditions.checkState(partition.getNextVersion() == commitVersion, "partitionNextVersion=" + partition.getNextVersion() + " commitVersion=" + commitVersion); partition.setNextVersion(commitVersion + 1); } } @NotNull LakeTable getTableOrThrow(@Nullable LockedDatabase db, long tableId) throws AlterCancelException { if (db == null) { throw new AlterCancelException("Database does not exist"); } LakeTable table = db.getTable(tableId); if (table == null) { throw new AlterCancelException("Table does not exist. tableId=" + tableId); } return table; } @Override public void replay(AlterJobV2 replayedJob) { LakeTableSchemaChangeJob other = (LakeTableSchemaChangeJob) replayedJob; LOG.info("Replaying lake table schema change job. state={} jobId={}", replayedJob.jobState, replayedJob.jobId); if (this != other) { Preconditions.checkState(this.type.equals(other.type)); Preconditions.checkState(this.jobId == other.jobId); Preconditions.checkState(this.dbId == other.dbId); Preconditions.checkState(this.tableId == other.tableId); this.jobState = other.jobState; this.createTimeMs = other.createTimeMs; this.finishedTimeMs = other.finishedTimeMs; this.errMsg = other.errMsg; this.timeoutMs = other.timeoutMs; this.partitionIndexTabletMap = other.partitionIndexTabletMap; this.partitionIndexMap = other.partitionIndexMap; this.indexIdMap = other.indexIdMap; this.indexIdToName = other.indexIdToName; this.indexSchemaMap = other.indexSchemaMap; this.indexShortKeyMap = other.indexShortKeyMap; this.hasBfChange = other.hasBfChange; this.bfColumns = other.bfColumns; this.bfFpp = other.bfFpp; this.indexChange = other.indexChange; this.indexes = other.indexes; this.watershedTxnId = other.watershedTxnId; this.startTime = other.startTime; this.commitVersionMap = other.commitVersionMap; } try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = (db != null) ? db.getTable(tableId) : null; if (table == null) { return; } if (jobState == JobState.PENDING) { addTabletToTabletInvertedIndex(table); table.setState(OlapTable.OlapTableState.SCHEMA_CHANGE); } else if (jobState == JobState.WAITING_TXN) { addShadowIndexToCatalog(table); } else if (jobState == JobState.FINISHED_REWRITING) { updateNextVersion(table); } else if (jobState == JobState.FINISHED) { table.onReload(); visualiseShadowIndex(table); } else if (jobState == JobState.CANCELLED) { removeShadowIndex(table); } else { throw new RuntimeException("unknown job state '{}'" + jobState.name()); } } } void addTabletToTabletInvertedIndex(@NotNull LakeTable table) { TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex(); for (Table.Cell<Long, Long, MaterializedIndex> cell : partitionIndexMap.cellSet()) { Long partitionId = cell.getRowKey(); Long shadowIndexId = cell.getColumnKey(); MaterializedIndex shadowIndex = cell.getValue(); assert partitionId != null; assert shadowIndexId != null; assert shadowIndex != null; TStorageMedium medium = table.getPartitionInfo().getDataProperty(partitionId).getStorageMedium(); TabletMeta shadowTabletMeta = new TabletMeta(dbId, tableId, partitionId, shadowIndexId, 0, medium, true); for (Tablet shadowTablet : shadowIndex.getTablets()) { invertedIndex.addTablet(shadowTablet.getId(), shadowTabletMeta); } } } void removeShadowIndex(@NotNull LakeTable table) { for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); Preconditions.checkNotNull(partition, partitionId); partition.setMinRetainVersion(0); for (MaterializedIndex shadowIdx : partitionIndexMap.row(partitionId).values()) { partition.deleteRollupIndex(shadowIdx.getId()); } } for (String shadowIndexName : indexIdToName.values()) { table.deleteIndexInfo(shadowIndexName); } TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex(); for (long partitionId : partitionIndexMap.rowKeySet()) { for (MaterializedIndex shadowIdx : partitionIndexMap.row(partitionId).values()) { for (Tablet tablet : shadowIdx.getTablets()) { invertedIndex.deleteTablet(tablet.getId()); } } } table.setState(OlapTable.OlapTableState.NORMAL); } @NotNull List<MaterializedIndex> visualiseShadowIndex(@NotNull LakeTable table) { List<MaterializedIndex> droppedIndexes = new ArrayList<>(); TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex(); for (Column column : table.getColumns()) { if (Type.VARCHAR.equals(column.getType())) { IDictManager.getInstance().removeGlobalDict(table.getId(), column.getName()); } } for (Partition partition : table.getPartitions()) { Preconditions.checkState(commitVersionMap.containsKey(partition.getId())); long commitVersion = commitVersionMap.get(partition.getId()); LOG.debug("update partition visible version. partition=" + partition.getId() + " commitVersion=" + commitVersion); Preconditions.checkState(commitVersion == partition.getVisibleVersion() + 1, commitVersion + " vs " + partition.getVisibleVersion()); partition.setVisibleVersion(commitVersion, finishedTimeMs); LOG.debug("update visible version of partition {} to {}. jobId={}", partition.getId(), commitVersion, jobId); TStorageMedium medium = table.getPartitionInfo().getDataProperty(partition.getId()).getStorageMedium(); for (Map.Entry<Long, Long> entry : indexIdMap.entrySet()) { long shadowIdxId = entry.getKey(); long originIdxId = entry.getValue(); MaterializedIndex shadowIdx = partition.getIndex(shadowIdxId); Preconditions.checkNotNull(shadowIdx, shadowIdxId); MaterializedIndex droppedIdx; if (originIdxId == partition.getBaseIndex().getId()) { droppedIdx = partition.getBaseIndex(); } else { droppedIdx = partition.deleteRollupIndex(originIdxId); } Preconditions.checkNotNull(droppedIdx, originIdxId + " vs. " + shadowIdxId); TabletMeta shadowTabletMeta = new TabletMeta(dbId, tableId, partition.getId(), shadowIdxId, 0, medium, true); for (Tablet tablet : shadowIdx.getTablets()) { invertedIndex.addTablet(tablet.getId(), shadowTabletMeta); } partition.visualiseShadowIndex(shadowIdxId, originIdxId == partition.getBaseIndex().getId()); for (Tablet originTablet : droppedIdx.getTablets()) { invertedIndex.deleteTablet(originTablet.getId()); } droppedIndexes.add(droppedIdx); } } for (Map.Entry<Long, Long> entry : indexIdMap.entrySet()) { long shadowIdxId = entry.getKey(); long originIdxId = entry.getValue(); String shadowIdxName = table.getIndexNameById(shadowIdxId); String originIdxName = table.getIndexNameById(originIdxId); table.deleteIndexInfo(originIdxName); table.renameIndexForSchemaChange(shadowIdxName, originIdxName); table.renameColumnNamePrefix(shadowIdxId); if (originIdxId == table.getBaseIndexId()) { table.setBaseIndexId(shadowIdxId); } } table.rebuildFullSchema(); if (hasBfChange) { table.setBloomFilterInfo(bfColumns, bfFpp); } if (indexChange) { table.setIndexes(indexes); } table.setState(OlapTable.OlapTableState.NORMAL); return droppedIndexes; } @Override protected boolean cancelImpl(String errMsg) { if (jobState == JobState.CANCELLED || jobState == JobState.FINISHED) { return false; } if (jobState == JobState.FINISHED_REWRITING && tableExists()) { return false; } if (schemaChangeBatchTask != null) { AgentTaskQueue.removeBatchTask(schemaChangeBatchTask, TTaskType.ALTER); } try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { LakeTable table = (db != null) ? db.getTable(tableId) : null; if (table != null) { removeShadowIndex(table); } } this.jobState = JobState.CANCELLED; this.errMsg = errMsg; this.finishedTimeMs = System.currentTimeMillis(); if (span != null) { span.setStatus(StatusCode.ERROR, errMsg); span.end(); } writeEditLog(this); return true; } AgentBatchTask getOrCreateSchemaChangeBatchTask() { if (schemaChangeBatchTask == null) { schemaChangeBatchTask = new AgentBatchTask(); } return schemaChangeBatchTask; } @Override protected void getInfo(List<List<Comparable>> infos) { String progress = FeConstants.NULL_STRING; if (jobState == JobState.RUNNING && schemaChangeBatchTask.getTaskNum() > 0) { progress = schemaChangeBatchTask.getFinishedTaskNum() + "/" + schemaChangeBatchTask.getTaskNum(); } for (Map.Entry<Long, Long> entry : indexIdMap.entrySet()) { long shadowIndexId = entry.getKey(); List<Comparable> info = Lists.newArrayList(); info.add(jobId); info.add(tableName); info.add(TimeUtils.longToTimeString(createTimeMs)); info.add(TimeUtils.longToTimeString(finishedTimeMs)); info.add(Column.removeNamePrefix(indexIdToName.get(shadowIndexId))); info.add(shadowIndexId); info.add(entry.getValue()); info.add("0:0"); info.add(watershedTxnId); info.add(jobState.name()); info.add(errMsg); info.add(progress); info.add(timeoutMs / 1000); infos.add(info); } } private boolean tableExists() { try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { return db != null && db.getTable(tableId) != null; } } @Override public void write(DataOutput out) throws IOException { String json = GsonUtils.GSON.toJson(this, AlterJobV2.class); Text.writeString(out, json); } @Nullable ReadLockedDatabase getReadLockedDatabase(long dbId) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); return db != null ? new ReadLockedDatabase(db) : null; } @Nullable WriteLockedDatabase getWriteLockedDatabase(long dbId) { Database db = GlobalStateMgr.getCurrentState().getDb(dbId); return db != null ? new WriteLockedDatabase(db) : null; } @VisibleForTesting public boolean isPreviousLoadFinished(long dbId, long tableId, long txnId) throws AnalysisException { GlobalTransactionMgr globalTxnMgr = GlobalStateMgr.getCurrentGlobalTransactionMgr(); return globalTxnMgr.isPreviousTransactionsFinished(txnId, dbId, Lists.newArrayList(tableId)); } private abstract static class LockedDatabase implements AutoCloseable { private final Database db; LockedDatabase(@NotNull Database db) { lock(db); this.db = db; } abstract void lock(Database db); abstract void unlock(Database db); @Nullable LakeTable getTable(long tableId) { return (LakeTable) db.getTable(tableId); } @Override public void close() { unlock(db); } } private static class ReadLockedDatabase extends LockedDatabase { ReadLockedDatabase(@NotNull Database db) { super(db); } @Override void lock(Database db) { db.readLock(); } @Override void unlock(Database db) { db.readUnlock(); } } private static class WriteLockedDatabase extends LockedDatabase { WriteLockedDatabase(@NotNull Database db) { super(db); } @Override void lock(Database db) { db.writeLock(); } @Override void unlock(Database db) { db.writeUnlock(); } } @Override public Optional<Long> getTransactionId() { return watershedTxnId < 0 ? Optional.empty() : Optional.of(watershedTxnId); } }
If the job is cancelled by user, the `clearJob()` will be called twice. Set thess strings to empty,to ensure the idempotency of the `clearJob` function.
private void clearJob() { Preconditions.checkState(state == JobState.FINISHED || state == JobState.CANCELLED); LOG.debug("kill etl job and delete etl files. id: {}, state: {}", id, state); SparkEtlJobHandler handler = new SparkEtlJobHandler(); if (state == JobState.CANCELLED) { if ((!Strings.isNullOrEmpty(appId) && sparkResource.isYarnMaster()) || sparkLoadAppHandle != null) { try { handler.killEtlJob(sparkLoadAppHandle, appId, id, sparkResource); } catch (Exception e) { LOG.warn("kill etl job failed. id: {}, state: {}", id, state, e); } } } if (!Strings.isNullOrEmpty(etlOutputPath)) { try { String outputPath = etlOutputPath.substring(0, etlOutputPath.lastIndexOf("/")); handler.deleteEtlOutputPath(outputPath, brokerDesc); } catch (Exception e) { LOG.warn("delete etl files failed. id: {}, state: {}", id, state, e); } } LOG.debug("clear push tasks and infos that not persist. id: {}, state: {}", id, state); writeLock(); try { for (Map<Long, PushTask> sentReplicaPushTask : tabletToSentReplicaPushTask.values()) { for (PushTask pushTask : sentReplicaPushTask.values()) { if (pushTask == null) { continue; } AgentTaskQueue.removeTask(pushTask.getBackendId(), pushTask.getTaskType(), pushTask.getSignature()); } } sparkLoadAppHandle = null; resourceDesc = null; etlOutputPath = ""; appId = ""; tableToLoadPartitions.clear(); indexToPushBrokerReaderParams.clear(); indexToSchemaHash.clear(); tabletToSentReplicaPushTask.clear(); finishedReplicas.clear(); quorumTablets.clear(); fullTablets.clear(); } finally { writeUnlock(); } }
appId = "";
private void clearJob() { Preconditions.checkState(state == JobState.FINISHED || state == JobState.CANCELLED); LOG.debug("kill etl job and delete etl files. id: {}, state: {}", id, state); SparkEtlJobHandler handler = new SparkEtlJobHandler(); if (state == JobState.CANCELLED) { if ((!Strings.isNullOrEmpty(appId) && sparkResource.isYarnMaster()) || sparkLoadAppHandle != null) { try { handler.killEtlJob(sparkLoadAppHandle, appId, id, sparkResource); } catch (Exception e) { LOG.warn("kill etl job failed. id: {}, state: {}", id, state, e); } } } if (!Strings.isNullOrEmpty(etlOutputPath)) { try { String outputPath = etlOutputPath.substring(0, etlOutputPath.lastIndexOf("/")); handler.deleteEtlOutputPath(outputPath, brokerDesc); } catch (Exception e) { LOG.warn("delete etl files failed. id: {}, state: {}", id, state, e); } } LOG.debug("clear push tasks and infos that not persist. id: {}, state: {}", id, state); writeLock(); try { for (Map<Long, PushTask> sentReplicaPushTask : tabletToSentReplicaPushTask.values()) { for (PushTask pushTask : sentReplicaPushTask.values()) { if (pushTask == null) { continue; } AgentTaskQueue.removeTask(pushTask.getBackendId(), pushTask.getTaskType(), pushTask.getSignature()); } } sparkLoadAppHandle = null; resourceDesc = null; etlOutputPath = ""; appId = ""; tableToLoadPartitions.clear(); indexToPushBrokerReaderParams.clear(); indexToSchemaHash.clear(); tabletToSentReplicaPushTask.clear(); finishedReplicas.clear(); quorumTablets.clear(); fullTablets.clear(); } finally { writeUnlock(); } }
class SparkLoadJob extends BulkLoadJob { private static final Logger LOG = LogManager.getLogger(SparkLoadJob.class); private SparkResource sparkResource; private long etlStartTimestamp = -1; private String appId = ""; private String etlOutputPath = ""; private Map<String, Pair<String, Long>> tabletMetaToFileInfo = Maps.newHashMap(); private ResourceDesc resourceDesc; private SparkLoadAppHandle sparkLoadAppHandle = new SparkLoadAppHandle(); private long quorumFinishTimestamp = -1; private Map<Long, Set<Long>> tableToLoadPartitions = Maps.newHashMap(); private Map<Long, PushBrokerReaderParams> indexToPushBrokerReaderParams = Maps.newHashMap(); private Map<Long, Integer> indexToSchemaHash = Maps.newHashMap(); private Map<Long, Map<Long, PushTask>> tabletToSentReplicaPushTask = Maps.newHashMap(); private Set<Long> finishedReplicas = Sets.newHashSet(); private Set<Long> quorumTablets = Sets.newHashSet(); private Set<Long> fullTablets = Sets.newHashSet(); public SparkLoadJob() { super(EtlJobType.SPARK); } public SparkLoadJob(long dbId, String label, ResourceDesc resourceDesc, OriginStatement originStmt, UserIdentity userInfo) throws MetaNotFoundException { super(EtlJobType.SPARK, dbId, label, originStmt, userInfo); this.resourceDesc = resourceDesc; } @Override public void setJobProperties(Map<String, String> properties) throws DdlException { super.setJobProperties(properties); setResourceInfo(); } /** * merge system conf with load stmt * * @throws DdlException */ private void setResourceInfo() throws DdlException { if (resourceDesc == null) { return; } String resourceName = resourceDesc.getName(); Resource oriResource = Env.getCurrentEnv().getResourceMgr().getResource(resourceName); if (oriResource == null) { throw new DdlException("Resource does not exist. name: " + resourceName); } sparkResource = ((SparkResource) oriResource).getCopiedResource(); sparkResource.update(resourceDesc); Map<String, String> brokerProperties = sparkResource.getBrokerPropertiesWithoutPrefix(); brokerDesc = new BrokerDesc(sparkResource.getBroker(), brokerProperties); } @Override public void beginTxn() throws LabelAlreadyUsedException, BeginTransactionException, AnalysisException, DuplicatedRequestException, QuotaExceedException, MetaNotFoundException { transactionId = Env.getCurrentGlobalTransactionMgr() .beginTransaction(dbId, Lists.newArrayList(fileGroupAggInfo.getAllTableIds()), label, null, new TxnCoordinator(TxnSourceType.FE, FrontendOptions.getLocalHostAddress()), LoadJobSourceType.FRONTEND, id, getTimeout()); } @Override protected void unprotectedExecuteJob() throws LoadException { try { beginTxn(); } catch (UserException e) { LOG.warn("failed to begin transaction for spark load job {}", id, e); throw new LoadException(e.getMessage()); } LoadTask task = new SparkLoadPendingTask(this, fileGroupAggInfo.getAggKeyToFileGroups(), sparkResource, brokerDesc); task.init(); idToTasks.put(task.getSignature(), task); Env.getCurrentEnv().getPendingLoadTaskScheduler().submit(task); } @Override public void onTaskFinished(TaskAttachment attachment) { if (attachment instanceof SparkPendingTaskAttachment) { onPendingTaskFinished((SparkPendingTaskAttachment) attachment); } } private void onPendingTaskFinished(SparkPendingTaskAttachment attachment) { writeLock(); try { if (isTxnDone()) { LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id).add("state", state) .add("error_msg", "this task will be ignored when job is: " + state).build()); return; } if (finishedTaskIds.contains(attachment.getTaskId())) { LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id).add("task_id", attachment.getTaskId()).add("error_msg", "this is a duplicated callback of pending task " + "when broker already has loading task") .build()); return; } finishedTaskIds.add(attachment.getTaskId()); sparkLoadAppHandle = attachment.getHandle(); appId = attachment.getAppId(); etlOutputPath = attachment.getOutputPath(); executeEtl(); unprotectedLogUpdateStateInfo(); } finally { writeUnlock(); } } /** * update etl start time and state in spark load job */ private void executeEtl() { etlStartTimestamp = System.currentTimeMillis(); state = JobState.ETL; LOG.info("update to {} state success. job id: {}", state, id); } private boolean checkState(JobState expectState) { readLock(); try { if (state == expectState) { return true; } return false; } finally { readUnlock(); } } /** * Check the status of etl job regularly * 1. RUNNING, update etl job progress * 2. CANCELLED, cancel load job * 3. FINISHED, get the etl output file paths, update job state to LOADING and log job update info * <p> * Send push tasks if job state changed to LOADING */ public void updateEtlStatus() throws Exception { if (!checkState(JobState.ETL)) { return; } SparkEtlJobHandler handler = new SparkEtlJobHandler(); EtlStatus status = handler.getEtlJobStatus(sparkLoadAppHandle, appId, id, etlOutputPath, sparkResource, brokerDesc); writeLock(); try { switch (status.getState()) { case RUNNING: unprotectedUpdateEtlStatusInternal(status); break; case FINISHED: unprotectedProcessEtlFinish(status, handler); break; case CANCELLED: throw new LoadException("spark etl job failed. msg: " + status.getFailMsg()); default: LOG.warn("unknown etl state: {}", status.getState().name()); break; } } finally { writeUnlock(); } if (checkState(JobState.LOADING)) { submitPushTasks(); } } private void unprotectedUpdateEtlStatusInternal(EtlStatus etlStatus) { loadingStatus = etlStatus; progress = etlStatus.getProgress(); if (!sparkResource.isYarnMaster()) { loadingStatus.setTrackingUrl(appId); } DppResult dppResult = etlStatus.getDppResult(); if (dppResult != null) { loadStatistic.fileNum = (int) dppResult.fileNumber; loadStatistic.totalFileSizeB = dppResult.fileSize; TUniqueId dummyId = new TUniqueId(0, 0); long dummyBackendId = -1L; loadStatistic.initLoad(dummyId, Sets.newHashSet(dummyId), Lists.newArrayList(dummyBackendId)); loadStatistic.updateLoadProgress(dummyBackendId, dummyId, dummyId, dppResult.scannedRows, dppResult.scannedBytes, true); Map<String, String> counters = loadingStatus.getCounters(); counters.put(DPP_NORMAL_ALL, String.valueOf(dppResult.normalRows)); counters.put(DPP_ABNORMAL_ALL, String.valueOf(dppResult.abnormalRows)); counters.put(UNSELECTED_ROWS, String.valueOf(dppResult.unselectRows)); } } private void unprotectedProcessEtlFinish(EtlStatus etlStatus, SparkEtlJobHandler handler) throws Exception { unprotectedUpdateEtlStatusInternal(etlStatus); if (!checkDataQuality()) { throw new DataQualityException(DataQualityException.QUALITY_FAIL_MSG); } unprotectedUpdateToLoadingState(etlStatus, handler.getEtlFilePaths(etlOutputPath, brokerDesc)); unprotectedLogUpdateStateInfo(); unprotectedPrepareLoadingInfos(); } private void unprotectedUpdateToLoadingState(EtlStatus etlStatus, Map<String, Long> filePathToSize) throws LoadException { try { for (Map.Entry<String, Long> entry : filePathToSize.entrySet()) { String filePath = entry.getKey(); if (!filePath.endsWith(EtlJobConfig.ETL_OUTPUT_FILE_FORMAT)) { continue; } String tabletMetaStr = EtlJobConfig.getTabletMetaStr(filePath); tabletMetaToFileInfo.put(tabletMetaStr, Pair.of(filePath, entry.getValue())); } loadingStatus = etlStatus; progress = 0; unprotectedUpdateState(JobState.LOADING); LOG.info("update to {} state success. job id: {}", state, id); } catch (Exception e) { LOG.warn("update to {} state failed. job id: {}", state, id, e); throw new LoadException(e.getMessage(), e); } } private void unprotectedPrepareLoadingInfos() { for (String tabletMetaStr : tabletMetaToFileInfo.keySet()) { String[] fileNameArr = tabletMetaStr.split("\\."); Preconditions.checkState(fileNameArr.length == 5); long tableId = Long.parseLong(fileNameArr[0]); long partitionId = Long.parseLong(fileNameArr[1]); long indexId = Long.parseLong(fileNameArr[2]); int schemaHash = Integer.parseInt(fileNameArr[4]); if (!tableToLoadPartitions.containsKey(tableId)) { tableToLoadPartitions.put(tableId, Sets.newHashSet()); } tableToLoadPartitions.get(tableId).add(partitionId); indexToSchemaHash.put(indexId, schemaHash); } } private PushBrokerReaderParams getPushBrokerReaderParams(OlapTable table, long indexId) throws UserException { if (!indexToPushBrokerReaderParams.containsKey(indexId)) { PushBrokerReaderParams pushBrokerReaderParams = new PushBrokerReaderParams(); pushBrokerReaderParams.init(table.getSchemaByIndexId(indexId), brokerDesc); indexToPushBrokerReaderParams.put(indexId, pushBrokerReaderParams); } return indexToPushBrokerReaderParams.get(indexId); } private Set<Long> submitPushTasks() throws UserException { Database db = null; try { db = getDb(); } catch (MetaNotFoundException e) { String errMsg = new LogBuilder(LogKey.LOAD_JOB, id).add("database_id", dbId).add("label", label) .add("error_msg", "db has been deleted when job is loading").build(); throw new MetaNotFoundException(errMsg); } AgentBatchTask batchTask = new AgentBatchTask(); boolean hasLoadPartitions = false; Set<Long> totalTablets = Sets.newHashSet(); List<? extends TableIf> tableList = db.getTablesOnIdOrderOrThrowException( Lists.newArrayList(tableToLoadPartitions.keySet())); MetaLockUtils.readLockTables(tableList); try { writeLock(); try { if (state != JobState.LOADING) { LOG.warn("job state is not loading. job id: {}, state: {}", id, state); return totalTablets; } for (TableIf table : tableList) { Set<Long> partitionIds = tableToLoadPartitions.get(table.getId()); OlapTable olapTable = (OlapTable) table; for (long partitionId : partitionIds) { Partition partition = olapTable.getPartition(partitionId); if (partition == null) { LOG.warn("partition does not exist. id: {}", partitionId); continue; } hasLoadPartitions = true; int quorumReplicaNum = olapTable.getPartitionInfo().getReplicaAllocation(partitionId).getTotalReplicaNum() / 2 + 1; List<MaterializedIndex> indexes = partition.getMaterializedIndices(IndexExtState.ALL); for (MaterializedIndex index : indexes) { long indexId = index.getId(); int schemaHash = indexToSchemaHash.get(indexId); List<TColumn> columnsDesc = new ArrayList<TColumn>(); for (Column column : olapTable.getSchemaByIndexId(indexId)) { columnsDesc.add(column.toThrift()); } int bucket = 0; for (Tablet tablet : index.getTablets()) { long tabletId = tablet.getId(); totalTablets.add(tabletId); String tabletMetaStr = String.format("%d.%d.%d.%d.%d", olapTable.getId(), partitionId, indexId, bucket++, schemaHash); Set<Long> tabletAllReplicas = Sets.newHashSet(); Set<Long> tabletFinishedReplicas = Sets.newHashSet(); for (Replica replica : tablet.getReplicas()) { long replicaId = replica.getId(); tabletAllReplicas.add(replicaId); if (!tabletToSentReplicaPushTask.containsKey(tabletId) || !tabletToSentReplicaPushTask.get(tabletId).containsKey(replicaId)) { long backendId = replica.getBackendId(); long taskSignature = Env.getCurrentGlobalTransactionMgr() .getTransactionIDGenerator().getNextTransactionId(); PushBrokerReaderParams params = getPushBrokerReaderParams(olapTable, indexId); TBrokerScanRange tBrokerScanRange = new TBrokerScanRange( params.tBrokerScanRange); TBrokerRangeDesc tBrokerRangeDesc = tBrokerScanRange.getRanges().get(0); tBrokerRangeDesc.setPath(""); tBrokerRangeDesc.setFileSize(-1); if (tabletMetaToFileInfo.containsKey(tabletMetaStr)) { Pair<String, Long> fileInfo = tabletMetaToFileInfo.get(tabletMetaStr); tBrokerRangeDesc.setPath(fileInfo.first); tBrokerRangeDesc.setFileSize(fileInfo.second); } Backend backend = Env.getCurrentEnv().getCurrentSystemInfo() .getBackend(backendId); FsBroker fsBroker = Env.getCurrentEnv().getBrokerMgr().getBroker( brokerDesc.getName(), backend.getHost()); tBrokerScanRange.getBrokerAddresses().add( new TNetworkAddress(fsBroker.ip, fsBroker.port)); LOG.debug("push task for replica {}, broker {}:{}," + " backendId {}, filePath {}, fileSize {}", replicaId, fsBroker.ip, fsBroker.port, backendId, tBrokerRangeDesc.path, tBrokerRangeDesc.file_size); PushTask pushTask = new PushTask(backendId, dbId, olapTable.getId(), partitionId, indexId, tabletId, replicaId, schemaHash, 0, id, TPushType.LOAD_V2, TPriority.NORMAL, transactionId, taskSignature, tBrokerScanRange, params.tDescriptorTable, columnsDesc); if (AgentTaskQueue.addTask(pushTask)) { batchTask.addTask(pushTask); if (!tabletToSentReplicaPushTask.containsKey(tabletId)) { tabletToSentReplicaPushTask.put(tabletId, Maps.newHashMap()); } tabletToSentReplicaPushTask.get(tabletId).put(replicaId, pushTask); } } if (finishedReplicas.contains(replicaId) && replica.getLastFailedVersion() < 0) { tabletFinishedReplicas.add(replicaId); } } if (tabletAllReplicas.size() == 0) { LOG.error("invalid situation. tablet is empty. id: {}", tabletId); } if (tabletFinishedReplicas.size() >= quorumReplicaNum) { quorumTablets.add(tabletId); if (tabletFinishedReplicas.size() == tabletAllReplicas.size()) { fullTablets.add(tabletId); } } } } } } if (batchTask.getTaskNum() > 0) { AgentTaskExecutor.submit(batchTask); } if (!hasLoadPartitions) { String errMsg = new LogBuilder(LogKey.LOAD_JOB, id).add("database_id", dbId).add("label", label) .add("error_msg", "all partitions have no load data").build(); throw new LoadException(errMsg); } return totalTablets; } finally { writeUnlock(); } } finally { MetaLockUtils.readUnlockTables(tableList); } } public void addFinishedReplica(long replicaId, long tabletId, long backendId) { writeLock(); try { if (finishedReplicas.add(replicaId)) { commitInfos.add(new TabletCommitInfo(tabletId, backendId)); Map<Long, PushTask> sentReplicaPushTask = tabletToSentReplicaPushTask.get(tabletId); if (sentReplicaPushTask != null) { if (sentReplicaPushTask.containsKey(replicaId)) { sentReplicaPushTask.put(replicaId, null); } } } } finally { writeUnlock(); } } /** * 1. Sends push tasks to Be * 2. Commit transaction after all push tasks execute successfully */ public void updateLoadingStatus() throws UserException { if (!checkState(JobState.LOADING)) { return; } Set<Long> totalTablets = submitPushTasks(); if (totalTablets.isEmpty()) { LOG.warn("total tablets set is empty. job id: {}, state: {}", id, state); return; } boolean canCommitJob = false; writeLock(); try { progress = fullTablets.size() * 100 / totalTablets.size(); if (progress == 100) { progress = 99; } if (quorumFinishTimestamp < 0 && quorumTablets.containsAll(totalTablets)) { quorumFinishTimestamp = System.currentTimeMillis(); } long stragglerTimeout = Config.load_straggler_wait_second * 1000; if ((quorumFinishTimestamp > 0 && System.currentTimeMillis() - quorumFinishTimestamp > stragglerTimeout) || fullTablets.containsAll(totalTablets)) { canCommitJob = true; } } finally { writeUnlock(); } if (canCommitJob) { tryCommitJob(); } } private void tryCommitJob() throws UserException { LOG.info(new LogBuilder(LogKey.LOAD_JOB, id).add("txn_id", transactionId) .add("msg", "Load job try to commit txn").build()); Database db = getDb(); List<Table> tableList = db.getTablesOnIdOrderOrThrowException( Lists.newArrayList(tableToLoadPartitions.keySet())); MetaLockUtils.writeLockTablesOrMetaException(tableList); try { Env.getCurrentGlobalTransactionMgr().commitTransaction( dbId, tableList, transactionId, commitInfos, new LoadJobFinalOperation(id, loadingStatus, progress, loadStartTimestamp, finishTimestamp, state, failMsg)); } catch (TabletQuorumFailedException e) { } finally { MetaLockUtils.writeUnlockTables(tableList); } } /** * load job already cancelled or finished, clear job below: * 1. kill etl job and delete etl files * 2. clear push tasks and infos that not persist */ @Override public void afterVisible(TransactionState txnState, boolean txnOperated) { super.afterVisible(txnState, txnOperated); clearJob(); } @Override public void afterAborted(TransactionState txnState, boolean txnOperated, String txnStatusChangeReason) throws UserException { super.afterAborted(txnState, txnOperated, txnStatusChangeReason); clearJob(); } @Override public void cancelJobWithoutCheck(FailMsg failMsg, boolean abortTxn, boolean needLog) { super.cancelJobWithoutCheck(failMsg, abortTxn, needLog); clearJob(); } @Override public void cancelJob(FailMsg failMsg) throws DdlException { super.cancelJob(failMsg); clearJob(); } @Override protected String getResourceName() { return sparkResource.getName(); } @Override protected long getEtlStartTimestamp() { return etlStartTimestamp; } public SparkLoadAppHandle getHandle() { return sparkLoadAppHandle; } public void clearSparkLauncherLog() { if (sparkLoadAppHandle != null) { String logPath = sparkLoadAppHandle.getLogPath(); if (!Strings.isNullOrEmpty(logPath)) { File file = new File(logPath); if (file.exists()) { file.delete(); } } } } @Override public void write(DataOutput out) throws IOException { super.write(out); sparkResource.write(out); sparkLoadAppHandle.write(out); out.writeLong(etlStartTimestamp); Text.writeString(out, appId); Text.writeString(out, etlOutputPath); out.writeInt(tabletMetaToFileInfo.size()); for (Map.Entry<String, Pair<String, Long>> entry : tabletMetaToFileInfo.entrySet()) { Text.writeString(out, entry.getKey()); Text.writeString(out, entry.getValue().first); out.writeLong(entry.getValue().second); } } public void readFields(DataInput in) throws IOException { super.readFields(in); sparkResource = (SparkResource) Resource.read(in); sparkLoadAppHandle = SparkLoadAppHandle.read(in); etlStartTimestamp = in.readLong(); appId = Text.readString(in); etlOutputPath = Text.readString(in); int size = in.readInt(); for (int i = 0; i < size; i++) { String tabletMetaStr = Text.readString(in); Pair<String, Long> fileInfo = Pair.of(Text.readString(in), in.readLong()); tabletMetaToFileInfo.put(tabletMetaStr, fileInfo); } } /** * log load job update info when job state changed to etl or loading */ private void unprotectedLogUpdateStateInfo() { SparkLoadJobStateUpdateInfo info = new SparkLoadJobStateUpdateInfo( id, state, transactionId, sparkLoadAppHandle, etlStartTimestamp, appId, etlOutputPath, loadStartTimestamp, tabletMetaToFileInfo); Env.getCurrentEnv().getEditLog().logUpdateLoadJob(info); } @Override public void replayUpdateStateInfo(LoadJobStateUpdateInfo info) { super.replayUpdateStateInfo(info); SparkLoadJobStateUpdateInfo sparkJobStateInfo = (SparkLoadJobStateUpdateInfo) info; sparkLoadAppHandle = sparkJobStateInfo.getSparkLoadAppHandle(); etlStartTimestamp = sparkJobStateInfo.getEtlStartTimestamp(); appId = sparkJobStateInfo.getAppId(); etlOutputPath = sparkJobStateInfo.getEtlOutputPath(); tabletMetaToFileInfo = sparkJobStateInfo.getTabletMetaToFileInfo(); switch (state) { case ETL: break; case LOADING: unprotectedPrepareLoadingInfos(); break; default: LOG.warn("replay update load job state info failed. error: wrong state. job id: {}, state: {}", id, state); break; } } /** * Used for spark load job journal log when job state changed to ETL or LOADING */ public static class SparkLoadJobStateUpdateInfo extends LoadJobStateUpdateInfo { @SerializedName(value = "sparkLoadAppHandle") private SparkLoadAppHandle sparkLoadAppHandle; @SerializedName(value = "etlStartTimestamp") private long etlStartTimestamp; @SerializedName(value = "appId") private String appId; @SerializedName(value = "etlOutputPath") private String etlOutputPath; @SerializedName(value = "tabletMetaToFileInfo") private Map<String, Pair<String, Long>> tabletMetaToFileInfo; public SparkLoadJobStateUpdateInfo(long jobId, JobState state, long transactionId, SparkLoadAppHandle sparkLoadAppHandle, long etlStartTimestamp, String appId, String etlOutputPath, long loadStartTimestamp, Map<String, Pair<String, Long>> tabletMetaToFileInfo) { super(jobId, state, transactionId, loadStartTimestamp); this.sparkLoadAppHandle = sparkLoadAppHandle; this.etlStartTimestamp = etlStartTimestamp; this.appId = appId; this.etlOutputPath = etlOutputPath; this.tabletMetaToFileInfo = tabletMetaToFileInfo; } public SparkLoadAppHandle getSparkLoadAppHandle() { return sparkLoadAppHandle; } public long getEtlStartTimestamp() { return etlStartTimestamp; } public String getAppId() { return appId; } public String getEtlOutputPath() { return etlOutputPath; } public Map<String, Pair<String, Long>> getTabletMetaToFileInfo() { return tabletMetaToFileInfo; } } /** * Params for be push broker reader * 1. TBrokerScanRange: file path and size, broker address, tranform expr * 2. TDescriptorTable: src and dest SlotDescriptors, src and dest tupleDescriptors * <p> * These params are sent to Be through push task */ private static class PushBrokerReaderParams { TBrokerScanRange tBrokerScanRange; TDescriptorTable tDescriptorTable; public PushBrokerReaderParams() { this.tBrokerScanRange = new TBrokerScanRange(); this.tDescriptorTable = null; } public void init(List<Column> columns, BrokerDesc brokerDesc) throws UserException { DescriptorTable descTable = new DescriptorTable(); TupleDescriptor destTupleDesc = descTable.createTupleDescriptor(); for (Column column : columns) { SlotDescriptor destSlotDesc = descTable.addSlotDescriptor(destTupleDesc); destSlotDesc.setIsMaterialized(true); destSlotDesc.setColumn(column); destSlotDesc.setIsNullable(column.isAllowNull()); } initTBrokerScanRange(descTable, destTupleDesc, columns, brokerDesc); initTDescriptorTable(descTable); } private void initTBrokerScanRange(DescriptorTable descTable, TupleDescriptor destTupleDesc, List<Column> columns, BrokerDesc brokerDesc) throws AnalysisException { TBrokerScanRangeParams params = new TBrokerScanRangeParams(); params.setStrictMode(false); params.setProperties(brokerDesc.getProperties()); TupleDescriptor srcTupleDesc = descTable.createTupleDescriptor(); Map<String, SlotDescriptor> srcSlotDescByName = Maps.newHashMap(); for (Column column : columns) { SlotDescriptor srcSlotDesc = descTable.addSlotDescriptor(srcTupleDesc); srcSlotDesc.setIsMaterialized(true); srcSlotDesc.setIsNullable(true); if (column.getDataType() == PrimitiveType.BITMAP) { srcSlotDesc.setType(ScalarType.createType(PrimitiveType.BITMAP)); srcSlotDesc.setColumn(new Column(column.getName(), PrimitiveType.BITMAP)); } else { srcSlotDesc.setType(ScalarType.createType(PrimitiveType.VARCHAR)); srcSlotDesc.setColumn(new Column(column.getName(), PrimitiveType.VARCHAR)); } params.addToSrcSlotIds(srcSlotDesc.getId().asInt()); srcSlotDescByName.put(column.getName(), srcSlotDesc); } Map<Integer, Integer> destSidToSrcSidWithoutTrans = Maps.newHashMap(); for (SlotDescriptor destSlotDesc : destTupleDesc.getSlots()) { if (!destSlotDesc.isMaterialized()) { continue; } SlotDescriptor srcSlotDesc = srcSlotDescByName.get(destSlotDesc.getColumn().getName()); destSidToSrcSidWithoutTrans.put(destSlotDesc.getId().asInt(), srcSlotDesc.getId().asInt()); Expr expr = new SlotRef(srcSlotDesc); expr = castToSlot(destSlotDesc, expr); params.putToExprOfDestSlot(destSlotDesc.getId().asInt(), expr.treeToThrift()); } params.setDestSidToSrcSidWithoutTrans(destSidToSrcSidWithoutTrans); params.setSrcTupleId(srcTupleDesc.getId().asInt()); params.setDestTupleId(destTupleDesc.getId().asInt()); tBrokerScanRange.setParams(params); tBrokerScanRange.setBrokerAddresses(Lists.newArrayList()); TBrokerRangeDesc tBrokerRangeDesc = new TBrokerRangeDesc(); tBrokerRangeDesc.setFileType(TFileType.FILE_BROKER); tBrokerRangeDesc.setFormatType(TFileFormatType.FORMAT_PARQUET); tBrokerRangeDesc.setSplittable(false); tBrokerRangeDesc.setStartOffset(0); tBrokerRangeDesc.setSize(-1); tBrokerScanRange.setRanges(Lists.newArrayList(tBrokerRangeDesc)); } private Expr castToSlot(SlotDescriptor slotDesc, Expr expr) throws AnalysisException { PrimitiveType dstType = slotDesc.getType().getPrimitiveType(); PrimitiveType srcType = expr.getType().getPrimitiveType(); if (dstType == PrimitiveType.BOOLEAN && srcType == PrimitiveType.VARCHAR) { return new CastExpr(Type.BOOLEAN, new CastExpr(Type.TINYINT, expr)); } if (dstType != srcType) { return expr.castTo(slotDesc.getType()); } return expr; } private void initTDescriptorTable(DescriptorTable descTable) { descTable.computeStatAndMemLayout(); tDescriptorTable = descTable.toThrift(); } } }
class SparkLoadJob extends BulkLoadJob { private static final Logger LOG = LogManager.getLogger(SparkLoadJob.class); private SparkResource sparkResource; private long etlStartTimestamp = -1; private String appId = ""; private String etlOutputPath = ""; private Map<String, Pair<String, Long>> tabletMetaToFileInfo = Maps.newHashMap(); private ResourceDesc resourceDesc; private SparkLoadAppHandle sparkLoadAppHandle = new SparkLoadAppHandle(); private long quorumFinishTimestamp = -1; private Map<Long, Set<Long>> tableToLoadPartitions = Maps.newHashMap(); private Map<Long, PushBrokerReaderParams> indexToPushBrokerReaderParams = Maps.newHashMap(); private Map<Long, Integer> indexToSchemaHash = Maps.newHashMap(); private Map<Long, Map<Long, PushTask>> tabletToSentReplicaPushTask = Maps.newHashMap(); private Set<Long> finishedReplicas = Sets.newHashSet(); private Set<Long> quorumTablets = Sets.newHashSet(); private Set<Long> fullTablets = Sets.newHashSet(); public SparkLoadJob() { super(EtlJobType.SPARK); } public SparkLoadJob(long dbId, String label, ResourceDesc resourceDesc, OriginStatement originStmt, UserIdentity userInfo) throws MetaNotFoundException { super(EtlJobType.SPARK, dbId, label, originStmt, userInfo); this.resourceDesc = resourceDesc; } @Override public void setJobProperties(Map<String, String> properties) throws DdlException { super.setJobProperties(properties); setResourceInfo(); } /** * merge system conf with load stmt * * @throws DdlException */ private void setResourceInfo() throws DdlException { if (resourceDesc == null) { return; } String resourceName = resourceDesc.getName(); Resource oriResource = Env.getCurrentEnv().getResourceMgr().getResource(resourceName); if (oriResource == null) { throw new DdlException("Resource does not exist. name: " + resourceName); } sparkResource = ((SparkResource) oriResource).getCopiedResource(); sparkResource.update(resourceDesc); Map<String, String> brokerProperties = sparkResource.getBrokerPropertiesWithoutPrefix(); brokerDesc = new BrokerDesc(sparkResource.getBroker(), brokerProperties); } @Override public void beginTxn() throws LabelAlreadyUsedException, BeginTransactionException, AnalysisException, DuplicatedRequestException, QuotaExceedException, MetaNotFoundException { transactionId = Env.getCurrentGlobalTransactionMgr() .beginTransaction(dbId, Lists.newArrayList(fileGroupAggInfo.getAllTableIds()), label, null, new TxnCoordinator(TxnSourceType.FE, FrontendOptions.getLocalHostAddress()), LoadJobSourceType.FRONTEND, id, getTimeout()); } @Override protected void unprotectedExecuteJob() throws LoadException { try { beginTxn(); } catch (UserException e) { LOG.warn("failed to begin transaction for spark load job {}", id, e); throw new LoadException(e.getMessage()); } LoadTask task = new SparkLoadPendingTask(this, fileGroupAggInfo.getAggKeyToFileGroups(), sparkResource, brokerDesc); task.init(); idToTasks.put(task.getSignature(), task); Env.getCurrentEnv().getPendingLoadTaskScheduler().submit(task); } @Override public void onTaskFinished(TaskAttachment attachment) { if (attachment instanceof SparkPendingTaskAttachment) { onPendingTaskFinished((SparkPendingTaskAttachment) attachment); } } private void onPendingTaskFinished(SparkPendingTaskAttachment attachment) { writeLock(); try { if (isTxnDone()) { LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id).add("state", state) .add("error_msg", "this task will be ignored when job is: " + state).build()); return; } if (finishedTaskIds.contains(attachment.getTaskId())) { LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id).add("task_id", attachment.getTaskId()).add("error_msg", "this is a duplicated callback of pending task " + "when broker already has loading task") .build()); return; } finishedTaskIds.add(attachment.getTaskId()); sparkLoadAppHandle = attachment.getHandle(); appId = attachment.getAppId(); etlOutputPath = attachment.getOutputPath(); executeEtl(); unprotectedLogUpdateStateInfo(); } finally { writeUnlock(); } } /** * update etl start time and state in spark load job */ private void executeEtl() { etlStartTimestamp = System.currentTimeMillis(); state = JobState.ETL; LOG.info("update to {} state success. job id: {}", state, id); } private boolean checkState(JobState expectState) { readLock(); try { if (state == expectState) { return true; } return false; } finally { readUnlock(); } } /** * Check the status of etl job regularly * 1. RUNNING, update etl job progress * 2. CANCELLED, cancel load job * 3. FINISHED, get the etl output file paths, update job state to LOADING and log job update info * <p> * Send push tasks if job state changed to LOADING */ public void updateEtlStatus() throws Exception { if (!checkState(JobState.ETL)) { return; } SparkEtlJobHandler handler = new SparkEtlJobHandler(); EtlStatus status = handler.getEtlJobStatus(sparkLoadAppHandle, appId, id, etlOutputPath, sparkResource, brokerDesc); writeLock(); try { switch (status.getState()) { case RUNNING: unprotectedUpdateEtlStatusInternal(status); break; case FINISHED: unprotectedProcessEtlFinish(status, handler); break; case CANCELLED: throw new LoadException("spark etl job failed. msg: " + status.getFailMsg()); default: LOG.warn("unknown etl state: {}", status.getState().name()); break; } } finally { writeUnlock(); } if (checkState(JobState.LOADING)) { submitPushTasks(); } } private void unprotectedUpdateEtlStatusInternal(EtlStatus etlStatus) { loadingStatus = etlStatus; progress = etlStatus.getProgress(); if (!sparkResource.isYarnMaster()) { loadingStatus.setTrackingUrl(appId); } DppResult dppResult = etlStatus.getDppResult(); if (dppResult != null) { loadStatistic.fileNum = (int) dppResult.fileNumber; loadStatistic.totalFileSizeB = dppResult.fileSize; TUniqueId dummyId = new TUniqueId(0, 0); long dummyBackendId = -1L; loadStatistic.initLoad(dummyId, Sets.newHashSet(dummyId), Lists.newArrayList(dummyBackendId)); loadStatistic.updateLoadProgress(dummyBackendId, dummyId, dummyId, dppResult.scannedRows, dppResult.scannedBytes, true); Map<String, String> counters = loadingStatus.getCounters(); counters.put(DPP_NORMAL_ALL, String.valueOf(dppResult.normalRows)); counters.put(DPP_ABNORMAL_ALL, String.valueOf(dppResult.abnormalRows)); counters.put(UNSELECTED_ROWS, String.valueOf(dppResult.unselectRows)); } } private void unprotectedProcessEtlFinish(EtlStatus etlStatus, SparkEtlJobHandler handler) throws Exception { unprotectedUpdateEtlStatusInternal(etlStatus); if (!checkDataQuality()) { throw new DataQualityException(DataQualityException.QUALITY_FAIL_MSG); } unprotectedUpdateToLoadingState(etlStatus, handler.getEtlFilePaths(etlOutputPath, brokerDesc)); unprotectedLogUpdateStateInfo(); unprotectedPrepareLoadingInfos(); } private void unprotectedUpdateToLoadingState(EtlStatus etlStatus, Map<String, Long> filePathToSize) throws LoadException { try { for (Map.Entry<String, Long> entry : filePathToSize.entrySet()) { String filePath = entry.getKey(); if (!filePath.endsWith(EtlJobConfig.ETL_OUTPUT_FILE_FORMAT)) { continue; } String tabletMetaStr = EtlJobConfig.getTabletMetaStr(filePath); tabletMetaToFileInfo.put(tabletMetaStr, Pair.of(filePath, entry.getValue())); } loadingStatus = etlStatus; progress = 0; unprotectedUpdateState(JobState.LOADING); LOG.info("update to {} state success. job id: {}", state, id); } catch (Exception e) { LOG.warn("update to {} state failed. job id: {}", state, id, e); throw new LoadException(e.getMessage(), e); } } private void unprotectedPrepareLoadingInfos() { for (String tabletMetaStr : tabletMetaToFileInfo.keySet()) { String[] fileNameArr = tabletMetaStr.split("\\."); Preconditions.checkState(fileNameArr.length == 5); long tableId = Long.parseLong(fileNameArr[0]); long partitionId = Long.parseLong(fileNameArr[1]); long indexId = Long.parseLong(fileNameArr[2]); int schemaHash = Integer.parseInt(fileNameArr[4]); if (!tableToLoadPartitions.containsKey(tableId)) { tableToLoadPartitions.put(tableId, Sets.newHashSet()); } tableToLoadPartitions.get(tableId).add(partitionId); indexToSchemaHash.put(indexId, schemaHash); } } private PushBrokerReaderParams getPushBrokerReaderParams(OlapTable table, long indexId) throws UserException { if (!indexToPushBrokerReaderParams.containsKey(indexId)) { PushBrokerReaderParams pushBrokerReaderParams = new PushBrokerReaderParams(); pushBrokerReaderParams.init(table.getSchemaByIndexId(indexId), brokerDesc); indexToPushBrokerReaderParams.put(indexId, pushBrokerReaderParams); } return indexToPushBrokerReaderParams.get(indexId); } private Set<Long> submitPushTasks() throws UserException { Database db = null; try { db = getDb(); } catch (MetaNotFoundException e) { String errMsg = new LogBuilder(LogKey.LOAD_JOB, id).add("database_id", dbId).add("label", label) .add("error_msg", "db has been deleted when job is loading").build(); throw new MetaNotFoundException(errMsg); } AgentBatchTask batchTask = new AgentBatchTask(); boolean hasLoadPartitions = false; Set<Long> totalTablets = Sets.newHashSet(); List<? extends TableIf> tableList = db.getTablesOnIdOrderOrThrowException( Lists.newArrayList(tableToLoadPartitions.keySet())); MetaLockUtils.readLockTables(tableList); try { writeLock(); try { if (state != JobState.LOADING) { LOG.warn("job state is not loading. job id: {}, state: {}", id, state); return totalTablets; } for (TableIf table : tableList) { Set<Long> partitionIds = tableToLoadPartitions.get(table.getId()); OlapTable olapTable = (OlapTable) table; for (long partitionId : partitionIds) { Partition partition = olapTable.getPartition(partitionId); if (partition == null) { LOG.warn("partition does not exist. id: {}", partitionId); continue; } hasLoadPartitions = true; int quorumReplicaNum = olapTable.getPartitionInfo().getReplicaAllocation(partitionId).getTotalReplicaNum() / 2 + 1; List<MaterializedIndex> indexes = partition.getMaterializedIndices(IndexExtState.ALL); for (MaterializedIndex index : indexes) { long indexId = index.getId(); int schemaHash = indexToSchemaHash.get(indexId); List<TColumn> columnsDesc = new ArrayList<TColumn>(); for (Column column : olapTable.getSchemaByIndexId(indexId)) { columnsDesc.add(column.toThrift()); } int bucket = 0; for (Tablet tablet : index.getTablets()) { long tabletId = tablet.getId(); totalTablets.add(tabletId); String tabletMetaStr = String.format("%d.%d.%d.%d.%d", olapTable.getId(), partitionId, indexId, bucket++, schemaHash); Set<Long> tabletAllReplicas = Sets.newHashSet(); Set<Long> tabletFinishedReplicas = Sets.newHashSet(); for (Replica replica : tablet.getReplicas()) { long replicaId = replica.getId(); tabletAllReplicas.add(replicaId); if (!tabletToSentReplicaPushTask.containsKey(tabletId) || !tabletToSentReplicaPushTask.get(tabletId).containsKey(replicaId)) { long backendId = replica.getBackendId(); long taskSignature = Env.getCurrentGlobalTransactionMgr() .getTransactionIDGenerator().getNextTransactionId(); PushBrokerReaderParams params = getPushBrokerReaderParams(olapTable, indexId); TBrokerScanRange tBrokerScanRange = new TBrokerScanRange( params.tBrokerScanRange); TBrokerRangeDesc tBrokerRangeDesc = tBrokerScanRange.getRanges().get(0); tBrokerRangeDesc.setPath(""); tBrokerRangeDesc.setFileSize(-1); if (tabletMetaToFileInfo.containsKey(tabletMetaStr)) { Pair<String, Long> fileInfo = tabletMetaToFileInfo.get(tabletMetaStr); tBrokerRangeDesc.setPath(fileInfo.first); tBrokerRangeDesc.setFileSize(fileInfo.second); } Backend backend = Env.getCurrentEnv().getCurrentSystemInfo() .getBackend(backendId); FsBroker fsBroker = Env.getCurrentEnv().getBrokerMgr().getBroker( brokerDesc.getName(), backend.getHost()); tBrokerScanRange.getBrokerAddresses().add( new TNetworkAddress(fsBroker.ip, fsBroker.port)); LOG.debug("push task for replica {}, broker {}:{}," + " backendId {}, filePath {}, fileSize {}", replicaId, fsBroker.ip, fsBroker.port, backendId, tBrokerRangeDesc.path, tBrokerRangeDesc.file_size); PushTask pushTask = new PushTask(backendId, dbId, olapTable.getId(), partitionId, indexId, tabletId, replicaId, schemaHash, 0, id, TPushType.LOAD_V2, TPriority.NORMAL, transactionId, taskSignature, tBrokerScanRange, params.tDescriptorTable, columnsDesc); if (AgentTaskQueue.addTask(pushTask)) { batchTask.addTask(pushTask); if (!tabletToSentReplicaPushTask.containsKey(tabletId)) { tabletToSentReplicaPushTask.put(tabletId, Maps.newHashMap()); } tabletToSentReplicaPushTask.get(tabletId).put(replicaId, pushTask); } } if (finishedReplicas.contains(replicaId) && replica.getLastFailedVersion() < 0) { tabletFinishedReplicas.add(replicaId); } } if (tabletAllReplicas.size() == 0) { LOG.error("invalid situation. tablet is empty. id: {}", tabletId); } if (tabletFinishedReplicas.size() >= quorumReplicaNum) { quorumTablets.add(tabletId); if (tabletFinishedReplicas.size() == tabletAllReplicas.size()) { fullTablets.add(tabletId); } } } } } } if (batchTask.getTaskNum() > 0) { AgentTaskExecutor.submit(batchTask); } if (!hasLoadPartitions) { String errMsg = new LogBuilder(LogKey.LOAD_JOB, id).add("database_id", dbId).add("label", label) .add("error_msg", "all partitions have no load data").build(); throw new LoadException(errMsg); } return totalTablets; } finally { writeUnlock(); } } finally { MetaLockUtils.readUnlockTables(tableList); } } public void addFinishedReplica(long replicaId, long tabletId, long backendId) { writeLock(); try { if (finishedReplicas.add(replicaId)) { commitInfos.add(new TabletCommitInfo(tabletId, backendId)); Map<Long, PushTask> sentReplicaPushTask = tabletToSentReplicaPushTask.get(tabletId); if (sentReplicaPushTask != null) { if (sentReplicaPushTask.containsKey(replicaId)) { sentReplicaPushTask.put(replicaId, null); } } } } finally { writeUnlock(); } } /** * 1. Sends push tasks to Be * 2. Commit transaction after all push tasks execute successfully */ public void updateLoadingStatus() throws UserException { if (!checkState(JobState.LOADING)) { return; } Set<Long> totalTablets = submitPushTasks(); if (totalTablets.isEmpty()) { LOG.warn("total tablets set is empty. job id: {}, state: {}", id, state); return; } boolean canCommitJob = false; writeLock(); try { progress = fullTablets.size() * 100 / totalTablets.size(); if (progress == 100) { progress = 99; } if (quorumFinishTimestamp < 0 && quorumTablets.containsAll(totalTablets)) { quorumFinishTimestamp = System.currentTimeMillis(); } long stragglerTimeout = Config.load_straggler_wait_second * 1000; if ((quorumFinishTimestamp > 0 && System.currentTimeMillis() - quorumFinishTimestamp > stragglerTimeout) || fullTablets.containsAll(totalTablets)) { canCommitJob = true; } } finally { writeUnlock(); } if (canCommitJob) { tryCommitJob(); } } private void tryCommitJob() throws UserException { LOG.info(new LogBuilder(LogKey.LOAD_JOB, id).add("txn_id", transactionId) .add("msg", "Load job try to commit txn").build()); Database db = getDb(); List<Table> tableList = db.getTablesOnIdOrderOrThrowException( Lists.newArrayList(tableToLoadPartitions.keySet())); MetaLockUtils.writeLockTablesOrMetaException(tableList); try { Env.getCurrentGlobalTransactionMgr().commitTransaction( dbId, tableList, transactionId, commitInfos, new LoadJobFinalOperation(id, loadingStatus, progress, loadStartTimestamp, finishTimestamp, state, failMsg)); } catch (TabletQuorumFailedException e) { } finally { MetaLockUtils.writeUnlockTables(tableList); } } /** * load job already cancelled or finished, clear job below: * 1. kill etl job and delete etl files * 2. clear push tasks and infos that not persist */ @Override public void afterVisible(TransactionState txnState, boolean txnOperated) { super.afterVisible(txnState, txnOperated); clearJob(); } @Override public void afterAborted(TransactionState txnState, boolean txnOperated, String txnStatusChangeReason) throws UserException { super.afterAborted(txnState, txnOperated, txnStatusChangeReason); clearJob(); } @Override public void cancelJobWithoutCheck(FailMsg failMsg, boolean abortTxn, boolean needLog) { super.cancelJobWithoutCheck(failMsg, abortTxn, needLog); clearJob(); } @Override public void cancelJob(FailMsg failMsg) throws DdlException { super.cancelJob(failMsg); clearJob(); } @Override protected String getResourceName() { return sparkResource.getName(); } @Override protected long getEtlStartTimestamp() { return etlStartTimestamp; } public SparkLoadAppHandle getHandle() { return sparkLoadAppHandle; } public void clearSparkLauncherLog() { if (sparkLoadAppHandle != null) { String logPath = sparkLoadAppHandle.getLogPath(); if (!Strings.isNullOrEmpty(logPath)) { File file = new File(logPath); if (file.exists()) { file.delete(); } } } } @Override public void write(DataOutput out) throws IOException { super.write(out); sparkResource.write(out); sparkLoadAppHandle.write(out); out.writeLong(etlStartTimestamp); Text.writeString(out, appId); Text.writeString(out, etlOutputPath); out.writeInt(tabletMetaToFileInfo.size()); for (Map.Entry<String, Pair<String, Long>> entry : tabletMetaToFileInfo.entrySet()) { Text.writeString(out, entry.getKey()); Text.writeString(out, entry.getValue().first); out.writeLong(entry.getValue().second); } } public void readFields(DataInput in) throws IOException { super.readFields(in); sparkResource = (SparkResource) Resource.read(in); sparkLoadAppHandle = SparkLoadAppHandle.read(in); etlStartTimestamp = in.readLong(); appId = Text.readString(in); etlOutputPath = Text.readString(in); int size = in.readInt(); for (int i = 0; i < size; i++) { String tabletMetaStr = Text.readString(in); Pair<String, Long> fileInfo = Pair.of(Text.readString(in), in.readLong()); tabletMetaToFileInfo.put(tabletMetaStr, fileInfo); } } /** * log load job update info when job state changed to etl or loading */ private void unprotectedLogUpdateStateInfo() { SparkLoadJobStateUpdateInfo info = new SparkLoadJobStateUpdateInfo( id, state, transactionId, sparkLoadAppHandle, etlStartTimestamp, appId, etlOutputPath, loadStartTimestamp, tabletMetaToFileInfo); Env.getCurrentEnv().getEditLog().logUpdateLoadJob(info); } @Override public void replayUpdateStateInfo(LoadJobStateUpdateInfo info) { super.replayUpdateStateInfo(info); SparkLoadJobStateUpdateInfo sparkJobStateInfo = (SparkLoadJobStateUpdateInfo) info; sparkLoadAppHandle = sparkJobStateInfo.getSparkLoadAppHandle(); etlStartTimestamp = sparkJobStateInfo.getEtlStartTimestamp(); appId = sparkJobStateInfo.getAppId(); etlOutputPath = sparkJobStateInfo.getEtlOutputPath(); tabletMetaToFileInfo = sparkJobStateInfo.getTabletMetaToFileInfo(); switch (state) { case ETL: break; case LOADING: unprotectedPrepareLoadingInfos(); break; default: LOG.warn("replay update load job state info failed. error: wrong state. job id: {}, state: {}", id, state); break; } } /** * Used for spark load job journal log when job state changed to ETL or LOADING */ public static class SparkLoadJobStateUpdateInfo extends LoadJobStateUpdateInfo { @SerializedName(value = "sparkLoadAppHandle") private SparkLoadAppHandle sparkLoadAppHandle; @SerializedName(value = "etlStartTimestamp") private long etlStartTimestamp; @SerializedName(value = "appId") private String appId; @SerializedName(value = "etlOutputPath") private String etlOutputPath; @SerializedName(value = "tabletMetaToFileInfo") private Map<String, Pair<String, Long>> tabletMetaToFileInfo; public SparkLoadJobStateUpdateInfo(long jobId, JobState state, long transactionId, SparkLoadAppHandle sparkLoadAppHandle, long etlStartTimestamp, String appId, String etlOutputPath, long loadStartTimestamp, Map<String, Pair<String, Long>> tabletMetaToFileInfo) { super(jobId, state, transactionId, loadStartTimestamp); this.sparkLoadAppHandle = sparkLoadAppHandle; this.etlStartTimestamp = etlStartTimestamp; this.appId = appId; this.etlOutputPath = etlOutputPath; this.tabletMetaToFileInfo = tabletMetaToFileInfo; } public SparkLoadAppHandle getSparkLoadAppHandle() { return sparkLoadAppHandle; } public long getEtlStartTimestamp() { return etlStartTimestamp; } public String getAppId() { return appId; } public String getEtlOutputPath() { return etlOutputPath; } public Map<String, Pair<String, Long>> getTabletMetaToFileInfo() { return tabletMetaToFileInfo; } } /** * Params for be push broker reader * 1. TBrokerScanRange: file path and size, broker address, tranform expr * 2. TDescriptorTable: src and dest SlotDescriptors, src and dest tupleDescriptors * <p> * These params are sent to Be through push task */ private static class PushBrokerReaderParams { TBrokerScanRange tBrokerScanRange; TDescriptorTable tDescriptorTable; public PushBrokerReaderParams() { this.tBrokerScanRange = new TBrokerScanRange(); this.tDescriptorTable = null; } public void init(List<Column> columns, BrokerDesc brokerDesc) throws UserException { DescriptorTable descTable = new DescriptorTable(); TupleDescriptor destTupleDesc = descTable.createTupleDescriptor(); for (Column column : columns) { SlotDescriptor destSlotDesc = descTable.addSlotDescriptor(destTupleDesc); destSlotDesc.setIsMaterialized(true); destSlotDesc.setColumn(column); destSlotDesc.setIsNullable(column.isAllowNull()); } initTBrokerScanRange(descTable, destTupleDesc, columns, brokerDesc); initTDescriptorTable(descTable); } private void initTBrokerScanRange(DescriptorTable descTable, TupleDescriptor destTupleDesc, List<Column> columns, BrokerDesc brokerDesc) throws AnalysisException { TBrokerScanRangeParams params = new TBrokerScanRangeParams(); params.setStrictMode(false); params.setProperties(brokerDesc.getProperties()); TupleDescriptor srcTupleDesc = descTable.createTupleDescriptor(); Map<String, SlotDescriptor> srcSlotDescByName = Maps.newHashMap(); for (Column column : columns) { SlotDescriptor srcSlotDesc = descTable.addSlotDescriptor(srcTupleDesc); srcSlotDesc.setIsMaterialized(true); srcSlotDesc.setIsNullable(true); if (column.getDataType() == PrimitiveType.BITMAP) { srcSlotDesc.setType(ScalarType.createType(PrimitiveType.BITMAP)); srcSlotDesc.setColumn(new Column(column.getName(), PrimitiveType.BITMAP)); } else { srcSlotDesc.setType(ScalarType.createType(PrimitiveType.VARCHAR)); srcSlotDesc.setColumn(new Column(column.getName(), PrimitiveType.VARCHAR)); } params.addToSrcSlotIds(srcSlotDesc.getId().asInt()); srcSlotDescByName.put(column.getName(), srcSlotDesc); } Map<Integer, Integer> destSidToSrcSidWithoutTrans = Maps.newHashMap(); for (SlotDescriptor destSlotDesc : destTupleDesc.getSlots()) { if (!destSlotDesc.isMaterialized()) { continue; } SlotDescriptor srcSlotDesc = srcSlotDescByName.get(destSlotDesc.getColumn().getName()); destSidToSrcSidWithoutTrans.put(destSlotDesc.getId().asInt(), srcSlotDesc.getId().asInt()); Expr expr = new SlotRef(srcSlotDesc); expr = castToSlot(destSlotDesc, expr); params.putToExprOfDestSlot(destSlotDesc.getId().asInt(), expr.treeToThrift()); } params.setDestSidToSrcSidWithoutTrans(destSidToSrcSidWithoutTrans); params.setSrcTupleId(srcTupleDesc.getId().asInt()); params.setDestTupleId(destTupleDesc.getId().asInt()); tBrokerScanRange.setParams(params); tBrokerScanRange.setBrokerAddresses(Lists.newArrayList()); TBrokerRangeDesc tBrokerRangeDesc = new TBrokerRangeDesc(); tBrokerRangeDesc.setFileType(TFileType.FILE_BROKER); tBrokerRangeDesc.setFormatType(TFileFormatType.FORMAT_PARQUET); tBrokerRangeDesc.setSplittable(false); tBrokerRangeDesc.setStartOffset(0); tBrokerRangeDesc.setSize(-1); tBrokerScanRange.setRanges(Lists.newArrayList(tBrokerRangeDesc)); } private Expr castToSlot(SlotDescriptor slotDesc, Expr expr) throws AnalysisException { PrimitiveType dstType = slotDesc.getType().getPrimitiveType(); PrimitiveType srcType = expr.getType().getPrimitiveType(); if (dstType == PrimitiveType.BOOLEAN && srcType == PrimitiveType.VARCHAR) { return new CastExpr(Type.BOOLEAN, new CastExpr(Type.TINYINT, expr)); } if (dstType != srcType) { return expr.castTo(slotDesc.getType()); } return expr; } private void initTDescriptorTable(DescriptorTable descTable) { descTable.computeStatAndMemLayout(); tDescriptorTable = descTable.toThrift(); } } }
Is this necessary, given that we'll call `FactoryUtil.validateFactoryOptions` and `FactoryUtil.validateUnconsumedKeys` later on?
public DynamicTableSource createDynamicTableSource(Context context) { createTableFactoryHelper(this, context).validateExcept(FIELDS); Configuration options = new Configuration(); context.getCatalogTable().getOptions().forEach(options::setString); TableSchema schema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); DataGenerator[] fieldGenerators = new DataGenerator[schema.getFieldCount()]; Set<ConfigOption<?>> optionalOptions = new HashSet<>(); for (int i = 0; i < fieldGenerators.length; i++) { String name = schema.getFieldNames()[i]; DataType type = schema.getFieldDataTypes()[i]; ConfigOption<String> kind = key(FIELDS + "." + name + "." + KIND) .stringType().defaultValue(RANDOM); DataGeneratorContainer container = createContainer(name, type, options.get(kind), options); fieldGenerators[i] = container.generator; optionalOptions.add(kind); optionalOptions.addAll(container.options); } FactoryUtil.validateFactoryOptions(new HashSet<>(), optionalOptions, options); Set<String> consumedOptionKeys = new HashSet<>(); consumedOptionKeys.add(CONNECTOR.key()); consumedOptionKeys.add(ROWS_PER_SECOND.key()); optionalOptions.stream().map(ConfigOption::key).forEach(consumedOptionKeys::add); FactoryUtil.validateUnconsumedKeys(factoryIdentifier(), options.keySet(), consumedOptionKeys); return new DataGenTableSource(fieldGenerators, schema, options.get(ROWS_PER_SECOND)); }
createTableFactoryHelper(this, context).validateExcept(FIELDS);
public DynamicTableSource createDynamicTableSource(Context context) { Configuration options = new Configuration(); context.getCatalogTable().getOptions().forEach(options::setString); TableSchema schema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); DataGenerator[] fieldGenerators = new DataGenerator[schema.getFieldCount()]; Set<ConfigOption<?>> optionalOptions = new HashSet<>(); for (int i = 0; i < fieldGenerators.length; i++) { String name = schema.getFieldNames()[i]; DataType type = schema.getFieldDataTypes()[i]; ConfigOption<String> kind = key(FIELDS + "." + name + "." + KIND) .stringType().defaultValue(RANDOM); DataGeneratorContainer container = createContainer(name, type, options.get(kind), options); fieldGenerators[i] = container.generator; optionalOptions.add(kind); optionalOptions.addAll(container.options); } FactoryUtil.validateFactoryOptions(requiredOptions(), optionalOptions, options); Set<String> consumedOptionKeys = new HashSet<>(); consumedOptionKeys.add(CONNECTOR.key()); consumedOptionKeys.add(ROWS_PER_SECOND.key()); optionalOptions.stream().map(ConfigOption::key).forEach(consumedOptionKeys::add); FactoryUtil.validateUnconsumedKeys(factoryIdentifier(), options.keySet(), consumedOptionKeys); return new DataGenTableSource(fieldGenerators, schema, options.get(ROWS_PER_SECOND)); }
class DataGenTableSourceFactory implements DynamicTableSourceFactory { public static final String IDENTIFIER = "datagen"; public static final Long ROWS_PER_SECOND_DEFAULT_VALUE = 10000L; public static final ConfigOption<Long> ROWS_PER_SECOND = key("rows-per-second") .longType() .defaultValue(ROWS_PER_SECOND_DEFAULT_VALUE) .withDescription("Rows per second to control the emit rate."); public static final String FIELDS = "fields"; public static final String KIND = "kind"; public static final String START = "start"; public static final String END = "end"; public static final String MIN = "min"; public static final String MAX = "max"; public static final String LENGTH = "length"; public static final String SEQUENCE = "sequence"; public static final String RANDOM = "random"; @Override public String factoryIdentifier() { return IDENTIFIER; } @Override public Set<ConfigOption<?>> requiredOptions() { return new HashSet<>(); } @Override public Set<ConfigOption<?>> optionalOptions() { Set<ConfigOption<?>> options = new HashSet<>(); options.add(ROWS_PER_SECOND); return options; } @Override private DataGeneratorContainer createContainer( String name, DataType type, String kind, ReadableConfig options) { switch (kind) { case RANDOM: return createRandomContainer(name, type, options); case SEQUENCE: return createSequenceContainer(name, type, options); default: throw new ValidationException("Unsupported generator kind: " + kind); } } private DataGeneratorContainer createRandomContainer(String name, DataType type, ReadableConfig config) { OptionBuilder minKey = key(FIELDS + "." + name + "." + MIN); OptionBuilder maxKey = key(FIELDS + "." + name + "." + MAX); switch (type.getLogicalType().getTypeRoot()) { case BOOLEAN: { return DataGeneratorContainer.of(RandomGenerator.booleanGenerator()); } case CHAR: case VARCHAR: { ConfigOption<Integer> lenOption = key(FIELDS + "." + name + "." + LENGTH) .intType() .defaultValue(100); return DataGeneratorContainer.of(getRandomStringGenerator(config.get(lenOption)), lenOption); } case TINYINT: { ConfigOption<Integer> min = minKey.intType().defaultValue((int) Byte.MIN_VALUE); ConfigOption<Integer> max = maxKey.intType().defaultValue((int) Byte.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.byteGenerator( config.get(min).byteValue(), config.get(max).byteValue()), min, max); } case SMALLINT: { ConfigOption<Integer> min = minKey.intType().defaultValue((int) Short.MIN_VALUE); ConfigOption<Integer> max = maxKey.intType().defaultValue((int) Short.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.shortGenerator( config.get(min).shortValue(), config.get(max).shortValue()), min, max); } case INTEGER: { ConfigOption<Integer> min = minKey.intType().defaultValue(Integer.MIN_VALUE); ConfigOption<Integer> max = maxKey.intType().defaultValue(Integer.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.intGenerator( config.get(min), config.get(max)), min, max); } case BIGINT: { ConfigOption<Long> min = minKey.longType().defaultValue(Long.MIN_VALUE); ConfigOption<Long> max = maxKey.longType().defaultValue(Long.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.longGenerator( config.get(min), config.get(max)), min, max); } case FLOAT: { ConfigOption<Float> min = minKey.floatType().defaultValue(Float.MIN_VALUE); ConfigOption<Float> max = maxKey.floatType().defaultValue(Float.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.floatGenerator( config.get(min), config.get(max)), min, max); } case DOUBLE: { ConfigOption<Double> min = minKey.doubleType().defaultValue(Double.MIN_VALUE); ConfigOption<Double> max = maxKey.doubleType().defaultValue(Double.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.doubleGenerator( config.get(min), config.get(max)), min, max); } default: throw new ValidationException("Unsupported type: " + type); } } private DataGeneratorContainer createSequenceContainer(String name, DataType type, ReadableConfig config) { String startKeyStr = FIELDS + "." + name + "." + START; String endKeyStr = FIELDS + "." + name + "." + END; OptionBuilder startKey = key(startKeyStr); OptionBuilder endKey = key(endKeyStr); config.getOptional(startKey.stringType().noDefaultValue()).orElseThrow( () -> new ValidationException( "Could not find required property '" + startKeyStr + "' for sequence generator.")); config.getOptional(endKey.stringType().noDefaultValue()).orElseThrow( () -> new ValidationException( "Could not find required property '" + endKeyStr + "' for sequence generator.")); ConfigOption<Integer> intStart = startKey.intType().noDefaultValue(); ConfigOption<Integer> intEnd = endKey.intType().noDefaultValue(); ConfigOption<Long> longStart = startKey.longType().noDefaultValue(); ConfigOption<Long> longEnd = endKey.longType().noDefaultValue(); switch (type.getLogicalType().getTypeRoot()) { case CHAR: case VARCHAR: return DataGeneratorContainer.of( getSequenceStringGenerator( config.get(longStart), config.get(longEnd)), longStart, longEnd); case TINYINT: return DataGeneratorContainer.of( SequenceGenerator.byteGenerator( config.get(intStart).byteValue(), config.get(intEnd).byteValue()), intStart, intEnd); case SMALLINT: return DataGeneratorContainer.of( SequenceGenerator.shortGenerator( config.get(intStart).shortValue(), config.get(intEnd).shortValue()), intStart, intEnd); case INTEGER: return DataGeneratorContainer.of( SequenceGenerator.intGenerator( config.get(intStart), config.get(intEnd)), intStart, intEnd); case BIGINT: return DataGeneratorContainer.of( SequenceGenerator.longGenerator( config.get(longStart), config.get(longEnd)), longStart, longEnd); case FLOAT: return DataGeneratorContainer.of( SequenceGenerator.floatGenerator( config.get(intStart).shortValue(), config.get(intEnd).shortValue()), intStart, intEnd); case DOUBLE: return DataGeneratorContainer.of( SequenceGenerator.doubleGenerator( config.get(intStart), config.get(intEnd)), intStart, intEnd); default: throw new ValidationException("Unsupported type: " + type); } } private static SequenceGenerator<StringData> getSequenceStringGenerator(long start, long end) { return new SequenceGenerator<StringData>(start, end) { @Override public StringData next() { return StringData.fromString(valuesToEmit.poll().toString()); } }; } private static RandomGenerator<StringData> getRandomStringGenerator(int length) { return new RandomGenerator<StringData>() { @Override public StringData next() { return StringData.fromString(random.nextHexString(length)); } }; } private static class DataGeneratorContainer { private DataGenerator generator; /** * Generator config options, for validation. */ private Set<ConfigOption<?>> options; private DataGeneratorContainer(DataGenerator generator, Set<ConfigOption<?>> options) { this.generator = generator; this.options = options; } private static DataGeneratorContainer of( DataGenerator generator, ConfigOption<?>... options) { return new DataGeneratorContainer(generator, new HashSet<>(Arrays.asList(options))); } } /** * A {@link StreamTableSource} that emits each number from a given interval exactly once, * possibly in parallel. See {@link StatefulSequenceSource}. */ static class DataGenTableSource implements ScanTableSource { private final DataGenerator[] fieldGenerators; private final TableSchema schema; private final long rowsPerSecond; private DataGenTableSource(DataGenerator[] fieldGenerators, TableSchema schema, long rowsPerSecond) { this.fieldGenerators = fieldGenerators; this.schema = schema; this.rowsPerSecond = rowsPerSecond; } @Override public ScanRuntimeProvider getScanRuntimeProvider(ScanContext context) { return SourceFunctionProvider.of(createSource(), false); } @VisibleForTesting DataGeneratorSource<RowData> createSource() { return new DataGeneratorSource<>( new RowGenerator(fieldGenerators, schema.getFieldNames()), rowsPerSecond); } @Override public DynamicTableSource copy() { return new DataGenTableSource(fieldGenerators, schema, rowsPerSecond); } @Override public String asSummaryString() { return "DataGenTableSource"; } @Override public ChangelogMode getChangelogMode() { return ChangelogMode.insertOnly(); } } private static class RowGenerator implements DataGenerator<RowData> { private static final long serialVersionUID = 1L; private final DataGenerator[] fieldGenerators; private final String[] fieldNames; private RowGenerator(DataGenerator[] fieldGenerators, String[] fieldNames) { this.fieldGenerators = fieldGenerators; this.fieldNames = fieldNames; } @Override public void open( String name, FunctionInitializationContext context, RuntimeContext runtimeContext) throws Exception { for (int i = 0; i < fieldGenerators.length; i++) { fieldGenerators[i].open(fieldNames[i], context, runtimeContext); } } @Override public void snapshotState(FunctionSnapshotContext context) throws Exception { for (DataGenerator generator : fieldGenerators) { generator.snapshotState(context); } } @Override public boolean hasNext() { for (DataGenerator generator : fieldGenerators) { if (!generator.hasNext()) { return false; } } return true; } @Override public RowData next() { GenericRowData row = new GenericRowData(fieldNames.length); for (int i = 0; i < fieldGenerators.length; i++) { row.setField(i, fieldGenerators[i].next()); } return row; } } }
class DataGenTableSourceFactory implements DynamicTableSourceFactory { public static final String IDENTIFIER = "datagen"; public static final Long ROWS_PER_SECOND_DEFAULT_VALUE = 10000L; public static final int RANDOM_STRING_LENGTH_DEFAULT = 100; public static final ConfigOption<Long> ROWS_PER_SECOND = key("rows-per-second") .longType() .defaultValue(ROWS_PER_SECOND_DEFAULT_VALUE) .withDescription("Rows per second to control the emit rate."); public static final String FIELDS = "fields"; public static final String KIND = "kind"; public static final String START = "start"; public static final String END = "end"; public static final String MIN = "min"; public static final String MAX = "max"; public static final String LENGTH = "length"; public static final String SEQUENCE = "sequence"; public static final String RANDOM = "random"; @Override public String factoryIdentifier() { return IDENTIFIER; } @Override public Set<ConfigOption<?>> requiredOptions() { return new HashSet<>(); } @Override public Set<ConfigOption<?>> optionalOptions() { Set<ConfigOption<?>> options = new HashSet<>(); options.add(ROWS_PER_SECOND); return options; } @Override private DataGeneratorContainer createContainer( String name, DataType type, String kind, ReadableConfig options) { switch (kind) { case RANDOM: return createRandomContainer(name, type, options); case SEQUENCE: return createSequenceContainer(name, type, options); default: throw new ValidationException("Unsupported generator kind: " + kind); } } private DataGeneratorContainer createRandomContainer(String name, DataType type, ReadableConfig config) { OptionBuilder minKey = key(FIELDS + "." + name + "." + MIN); OptionBuilder maxKey = key(FIELDS + "." + name + "." + MAX); switch (type.getLogicalType().getTypeRoot()) { case BOOLEAN: { return DataGeneratorContainer.of(RandomGenerator.booleanGenerator()); } case CHAR: case VARCHAR: { ConfigOption<Integer> lenOption = key(FIELDS + "." + name + "." + LENGTH) .intType() .defaultValue(RANDOM_STRING_LENGTH_DEFAULT); return DataGeneratorContainer.of(getRandomStringGenerator(config.get(lenOption)), lenOption); } case TINYINT: { ConfigOption<Integer> min = minKey.intType().defaultValue((int) Byte.MIN_VALUE); ConfigOption<Integer> max = maxKey.intType().defaultValue((int) Byte.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.byteGenerator( config.get(min).byteValue(), config.get(max).byteValue()), min, max); } case SMALLINT: { ConfigOption<Integer> min = minKey.intType().defaultValue((int) Short.MIN_VALUE); ConfigOption<Integer> max = maxKey.intType().defaultValue((int) Short.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.shortGenerator( config.get(min).shortValue(), config.get(max).shortValue()), min, max); } case INTEGER: { ConfigOption<Integer> min = minKey.intType().defaultValue(Integer.MIN_VALUE); ConfigOption<Integer> max = maxKey.intType().defaultValue(Integer.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.intGenerator( config.get(min), config.get(max)), min, max); } case BIGINT: { ConfigOption<Long> min = minKey.longType().defaultValue(Long.MIN_VALUE); ConfigOption<Long> max = maxKey.longType().defaultValue(Long.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.longGenerator( config.get(min), config.get(max)), min, max); } case FLOAT: { ConfigOption<Float> min = minKey.floatType().defaultValue(Float.MIN_VALUE); ConfigOption<Float> max = maxKey.floatType().defaultValue(Float.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.floatGenerator( config.get(min), config.get(max)), min, max); } case DOUBLE: { ConfigOption<Double> min = minKey.doubleType().defaultValue(Double.MIN_VALUE); ConfigOption<Double> max = maxKey.doubleType().defaultValue(Double.MAX_VALUE); return DataGeneratorContainer.of( RandomGenerator.doubleGenerator( config.get(min), config.get(max)), min, max); } default: throw new ValidationException("Unsupported type: " + type); } } private DataGeneratorContainer createSequenceContainer(String name, DataType type, ReadableConfig config) { String startKeyStr = FIELDS + "." + name + "." + START; String endKeyStr = FIELDS + "." + name + "." + END; OptionBuilder startKey = key(startKeyStr); OptionBuilder endKey = key(endKeyStr); config.getOptional(startKey.stringType().noDefaultValue()).orElseThrow( () -> new ValidationException( "Could not find required property '" + startKeyStr + "' for sequence generator.")); config.getOptional(endKey.stringType().noDefaultValue()).orElseThrow( () -> new ValidationException( "Could not find required property '" + endKeyStr + "' for sequence generator.")); ConfigOption<Integer> intStart = startKey.intType().noDefaultValue(); ConfigOption<Integer> intEnd = endKey.intType().noDefaultValue(); ConfigOption<Long> longStart = startKey.longType().noDefaultValue(); ConfigOption<Long> longEnd = endKey.longType().noDefaultValue(); switch (type.getLogicalType().getTypeRoot()) { case CHAR: case VARCHAR: return DataGeneratorContainer.of( getSequenceStringGenerator( config.get(longStart), config.get(longEnd)), longStart, longEnd); case TINYINT: return DataGeneratorContainer.of( SequenceGenerator.byteGenerator( config.get(intStart).byteValue(), config.get(intEnd).byteValue()), intStart, intEnd); case SMALLINT: return DataGeneratorContainer.of( SequenceGenerator.shortGenerator( config.get(intStart).shortValue(), config.get(intEnd).shortValue()), intStart, intEnd); case INTEGER: return DataGeneratorContainer.of( SequenceGenerator.intGenerator( config.get(intStart), config.get(intEnd)), intStart, intEnd); case BIGINT: return DataGeneratorContainer.of( SequenceGenerator.longGenerator( config.get(longStart), config.get(longEnd)), longStart, longEnd); case FLOAT: return DataGeneratorContainer.of( SequenceGenerator.floatGenerator( config.get(intStart).shortValue(), config.get(intEnd).shortValue()), intStart, intEnd); case DOUBLE: return DataGeneratorContainer.of( SequenceGenerator.doubleGenerator( config.get(intStart), config.get(intEnd)), intStart, intEnd); default: throw new ValidationException("Unsupported type: " + type); } } private static SequenceGenerator<StringData> getSequenceStringGenerator(long start, long end) { return new SequenceGenerator<StringData>(start, end) { @Override public StringData next() { return StringData.fromString(valuesToEmit.poll().toString()); } }; } private static RandomGenerator<StringData> getRandomStringGenerator(int length) { return new RandomGenerator<StringData>() { @Override public StringData next() { return StringData.fromString(random.nextHexString(length)); } }; } private static class DataGeneratorContainer { private DataGenerator generator; /** * Generator config options, for validation. */ private Set<ConfigOption<?>> options; private DataGeneratorContainer(DataGenerator generator, Set<ConfigOption<?>> options) { this.generator = generator; this.options = options; } private static DataGeneratorContainer of( DataGenerator generator, ConfigOption<?>... options) { return new DataGeneratorContainer(generator, new HashSet<>(Arrays.asList(options))); } } /** * A {@link StreamTableSource} that emits each number from a given interval exactly once, * possibly in parallel. See {@link StatefulSequenceSource}. */ static class DataGenTableSource implements ScanTableSource { private final DataGenerator[] fieldGenerators; private final TableSchema schema; private final long rowsPerSecond; private DataGenTableSource(DataGenerator[] fieldGenerators, TableSchema schema, long rowsPerSecond) { this.fieldGenerators = fieldGenerators; this.schema = schema; this.rowsPerSecond = rowsPerSecond; } @Override public ScanRuntimeProvider getScanRuntimeProvider(ScanContext context) { return SourceFunctionProvider.of(createSource(), false); } @VisibleForTesting DataGeneratorSource<RowData> createSource() { return new DataGeneratorSource<>( new RowGenerator(fieldGenerators, schema.getFieldNames()), rowsPerSecond); } @Override public DynamicTableSource copy() { return new DataGenTableSource(fieldGenerators, schema, rowsPerSecond); } @Override public String asSummaryString() { return "DataGenTableSource"; } @Override public ChangelogMode getChangelogMode() { return ChangelogMode.insertOnly(); } } private static class RowGenerator implements DataGenerator<RowData> { private static final long serialVersionUID = 1L; private final DataGenerator[] fieldGenerators; private final String[] fieldNames; private RowGenerator(DataGenerator[] fieldGenerators, String[] fieldNames) { this.fieldGenerators = fieldGenerators; this.fieldNames = fieldNames; } @Override public void open( String name, FunctionInitializationContext context, RuntimeContext runtimeContext) throws Exception { for (int i = 0; i < fieldGenerators.length; i++) { fieldGenerators[i].open(fieldNames[i], context, runtimeContext); } } @Override public void snapshotState(FunctionSnapshotContext context) throws Exception { for (DataGenerator generator : fieldGenerators) { generator.snapshotState(context); } } @Override public boolean hasNext() { for (DataGenerator generator : fieldGenerators) { if (!generator.hasNext()) { return false; } } return true; } @Override public RowData next() { GenericRowData row = new GenericRowData(fieldNames.length); for (int i = 0; i < fieldGenerators.length; i++) { row.setField(i, fieldGenerators[i].next()); } return row; } } }
nit: can use `hasSize` This also applies to below lines.
void testLazyInitialization() throws Exception { final int parallelism = 3; final int configuredMaxParallelism = 12; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(parallelism, configuredMaxParallelism, -1); Assertions.assertThat(ejv.getParallelism()).isEqualTo(parallelism); Assertions.assertThat(ejv.getMaxParallelism()).isEqualTo(configuredMaxParallelism); Assertions.assertThat(ejv.isInitialized()).isFalse(); Assertions.assertThat(ejv.getTaskVertices()).isEmpty(); Assertions.assertThatThrownBy(ejv::getInputs).isInstanceOf(IllegalStateException.class); Assertions.assertThatThrownBy(ejv::getProducedDataSets) .isInstanceOf(IllegalStateException.class); Assertions.assertThatThrownBy(ejv::getSplitAssigner) .isInstanceOf(IllegalStateException.class); Assertions.assertThatThrownBy(ejv::getOperatorCoordinators) .isInstanceOf(IllegalStateException.class); Assertions.assertThatThrownBy(() -> ejv.connectToPredecessors(Collections.emptyMap())) .isInstanceOf(IllegalStateException.class); Assertions.assertThatThrownBy(ejv::executionVertexFinished) .isInstanceOf(IllegalStateException.class); Assertions.assertThatThrownBy(ejv::executionVertexUnFinished) .isInstanceOf(IllegalStateException.class); initializeVertex(ejv); Assertions.assertThat(ejv.isInitialized()).isTrue(); Assertions.assertThat(ejv.getTaskVertices().length).isEqualTo(3); Assertions.assertThat(ejv.getInputs().size()).isEqualTo(0); Assertions.assertThat(ejv.getProducedDataSets().length).isEqualTo(1); Assertions.assertThat(ejv.getOperatorCoordinators().size()).isEqualTo(0); }
Assertions.assertThat(ejv.getTaskVertices().length).isEqualTo(3);
void testLazyInitialization() throws Exception { final int parallelism = 3; final int configuredMaxParallelism = 12; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(parallelism, configuredMaxParallelism, -1); assertThat(ejv.getParallelism()).isEqualTo(parallelism); assertThat(ejv.getMaxParallelism()).isEqualTo(configuredMaxParallelism); assertThat(ejv.isInitialized()).isFalse(); assertThat(ejv.getTaskVertices()).isEmpty(); assertThatThrownBy(ejv::getInputs).isInstanceOf(IllegalStateException.class); assertThatThrownBy(ejv::getProducedDataSets).isInstanceOf(IllegalStateException.class); assertThatThrownBy(ejv::getSplitAssigner).isInstanceOf(IllegalStateException.class); assertThatThrownBy(ejv::getOperatorCoordinators).isInstanceOf(IllegalStateException.class); assertThatThrownBy(() -> ejv.connectToPredecessors(Collections.emptyMap())) .isInstanceOf(IllegalStateException.class); assertThatThrownBy(ejv::executionVertexFinished).isInstanceOf(IllegalStateException.class); assertThatThrownBy(ejv::executionVertexUnFinished) .isInstanceOf(IllegalStateException.class); initializeVertex(ejv); assertThat(ejv.isInitialized()).isTrue(); assertThat(ejv.getTaskVertices()).hasSize(3); assertThat(ejv.getInputs()).isEmpty(); assertThat(ejv.getProducedDataSets()).hasSize(1); assertThat(ejv.getOperatorCoordinators()).isEmpty(); }
class ExecutionJobVertexTest { @RegisterExtension static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_RESOURCE = TestingUtils.defaultExecutorExtension(); @Test void testParallelismGreaterThanMaxParallelism() { JobVertex jobVertex = new JobVertex("testVertex"); jobVertex.setInvokableClass(AbstractInvokable.class); jobVertex.setParallelism(172); jobVertex.setMaxParallelism(4); Assertions.assertThatThrownBy( () -> ExecutionGraphTestUtils.getExecutionJobVertex(jobVertex)) .isInstanceOf(JobException.class) .hasMessageContaining("higher than the max parallelism"); } @Test @Test void testErrorIfInitializationWithoutParallelismDecided() throws Exception { final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(); Assertions.assertThatThrownBy(() -> initializeVertex(ejv)) .isInstanceOf(IllegalStateException.class); } @Test void testSetParallelismLazily() throws Exception { final int parallelism = 3; final int defaultMaxParallelism = 13; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(-1, -1, defaultMaxParallelism); Assertions.assertThat(ejv.isParallelismDecided()).isFalse(); ejv.setParallelism(parallelism); Assertions.assertThat(ejv.isParallelismDecided()).isTrue(); Assertions.assertThat(ejv.getParallelism()).isEqualTo(parallelism); initializeVertex(ejv); Assertions.assertThat(ejv.getTaskVertices().length).isEqualTo(parallelism); } @Test void testConfiguredMaxParallelismIsRespected() throws Exception { final int configuredMaxParallelism = 12; final int defaultMaxParallelism = 13; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex( -1, configuredMaxParallelism, defaultMaxParallelism); Assertions.assertThat(ejv.getMaxParallelism()).isEqualTo(configuredMaxParallelism); } @Test void testComputingMaxParallelismFromConfiguredParallelism() throws Exception { final int parallelism = 300; final int defaultMaxParallelism = 13; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(parallelism, -1, defaultMaxParallelism); Assertions.assertThat(ejv.getMaxParallelism()).isEqualTo(512); } @Test void testFallingBackToDefaultMaxParallelism() throws Exception { final int defaultMaxParallelism = 13; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(-1, -1, defaultMaxParallelism); Assertions.assertThat(ejv.getMaxParallelism()).isEqualTo(defaultMaxParallelism); } static void initializeVertex(ExecutionJobVertex vertex) throws Exception { vertex.initialize( 1, Time.milliseconds(1L), 1L, new DefaultSubtaskAttemptNumberStore(Collections.emptyList()), new CoordinatorStoreImpl()); } private static ExecutionJobVertex createDynamicExecutionJobVertex() throws Exception { return createDynamicExecutionJobVertex(-1, -1, 1); } public static ExecutionJobVertex createDynamicExecutionJobVertex( int parallelism, int maxParallelism, int defaultMaxParallelism) throws Exception { JobVertex jobVertex = new JobVertex("testVertex"); jobVertex.setInvokableClass(AbstractInvokable.class); jobVertex.getOrCreateResultDataSet( new IntermediateDataSetID(), ResultPartitionType.BLOCKING); if (maxParallelism > 0) { jobVertex.setMaxParallelism(maxParallelism); } if (parallelism > 0) { jobVertex.setParallelism(parallelism); } final DefaultExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder() .build(EXECUTOR_RESOURCE.getExecutor()); final VertexParallelismStore vertexParallelismStore = AdaptiveBatchScheduler.computeVertexParallelismStoreForDynamicGraph( Collections.singletonList(jobVertex), defaultMaxParallelism); final VertexParallelismInformation vertexParallelismInfo = vertexParallelismStore.getParallelismInfo(jobVertex.getID()); return new ExecutionJobVertex(eg, jobVertex, vertexParallelismInfo); } }
class ExecutionJobVertexTest { @RegisterExtension static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_RESOURCE = TestingUtils.defaultExecutorExtension(); @Test void testParallelismGreaterThanMaxParallelism() { JobVertex jobVertex = new JobVertex("testVertex"); jobVertex.setInvokableClass(AbstractInvokable.class); jobVertex.setParallelism(172); jobVertex.setMaxParallelism(4); assertThatThrownBy(() -> ExecutionGraphTestUtils.getExecutionJobVertex(jobVertex)) .isInstanceOf(JobException.class) .hasMessageContaining("higher than the max parallelism"); } @Test @Test void testErrorIfInitializationWithoutParallelismDecided() throws Exception { final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(); assertThatThrownBy(() -> initializeVertex(ejv)).isInstanceOf(IllegalStateException.class); } @Test void testSetParallelismLazily() throws Exception { final int parallelism = 3; final int defaultMaxParallelism = 13; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(-1, -1, defaultMaxParallelism); assertThat(ejv.isParallelismDecided()).isFalse(); ejv.setParallelism(parallelism); assertThat(ejv.isParallelismDecided()).isTrue(); assertThat(ejv.getParallelism()).isEqualTo(parallelism); initializeVertex(ejv); assertThat(ejv.getTaskVertices()).hasSize(parallelism); } @Test void testConfiguredMaxParallelismIsRespected() throws Exception { final int configuredMaxParallelism = 12; final int defaultMaxParallelism = 13; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex( -1, configuredMaxParallelism, defaultMaxParallelism); assertThat(ejv.getMaxParallelism()).isEqualTo(configuredMaxParallelism); } @Test void testComputingMaxParallelismFromConfiguredParallelism() throws Exception { final int parallelism = 300; final int defaultMaxParallelism = 13; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(parallelism, -1, defaultMaxParallelism); assertThat(ejv.getMaxParallelism()).isEqualTo(512); } @Test void testFallingBackToDefaultMaxParallelism() throws Exception { final int defaultMaxParallelism = 13; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(-1, -1, defaultMaxParallelism); assertThat(ejv.getMaxParallelism()).isEqualTo(defaultMaxParallelism); } static void initializeVertex(ExecutionJobVertex vertex) throws Exception { vertex.initialize( 1, Time.milliseconds(1L), 1L, new DefaultSubtaskAttemptNumberStore(Collections.emptyList()), new CoordinatorStoreImpl()); } private static ExecutionJobVertex createDynamicExecutionJobVertex() throws Exception { return createDynamicExecutionJobVertex(-1, -1, 1); } public static ExecutionJobVertex createDynamicExecutionJobVertex( int parallelism, int maxParallelism, int defaultMaxParallelism) throws Exception { JobVertex jobVertex = new JobVertex("testVertex"); jobVertex.setInvokableClass(AbstractInvokable.class); jobVertex.getOrCreateResultDataSet( new IntermediateDataSetID(), ResultPartitionType.BLOCKING); if (maxParallelism > 0) { jobVertex.setMaxParallelism(maxParallelism); } if (parallelism > 0) { jobVertex.setParallelism(parallelism); } final DefaultExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder() .build(EXECUTOR_RESOURCE.getExecutor()); final VertexParallelismStore vertexParallelismStore = AdaptiveBatchScheduler.computeVertexParallelismStoreForDynamicGraph( Collections.singletonList(jobVertex), defaultMaxParallelism); final VertexParallelismInformation vertexParallelismInfo = vertexParallelismStore.getParallelismInfo(jobVertex.getID()); return new ExecutionJobVertex(eg, jobVertex, vertexParallelismInfo); } }
`describe formatted tbl` => `DESCRIBE FORMATTED `TBL`` ?
public void testDescribeTable() { sql("describe tbl").ok("DESCRIBE `TBL`"); sql("describe extended tbl").ok("DESCRIBE EXTENDED `TBL`"); sql("describe formatted tbl").ok("DESCRIBE EXTENDED `TBL`"); }
}
public void testDescribeTable() { sql("describe tbl").ok("DESCRIBE `TBL`"); sql("describe extended tbl").ok("DESCRIBE EXTENDED `TBL`"); sql("describe formatted tbl").ok("DESCRIBE FORMATTED `TBL`"); }
class FlinkHiveSqlParserImplTest extends SqlParserTest { @Override protected SqlParserImplFactory parserImplFactory() { return FlinkHiveSqlParserImpl.FACTORY; } @Override public void testDescribeStatement() { } @Override public void testTableHintsInInsert() { } @Override public void testDescribeSchema() { } @Test public void testShowDatabases() { sql("show databases").ok("SHOW DATABASES"); } @Test public void testUseDatabase() { sql("use db1").ok("USE `DB1`"); } @Test public void testCreateDatabase() { sql("create database db1") .ok("CREATE DATABASE `DB1`"); sql("create database db1 comment 'comment db1' location '/path/to/db1'") .ok("CREATE DATABASE `DB1`\n" + "COMMENT 'comment db1'\n" + "LOCATION '/path/to/db1'"); sql("create database db1 with dbproperties ('k1'='v1','k2'='v2')") .ok("CREATE DATABASE `DB1` WITH DBPROPERTIES (\n" + " 'k1' = 'v1',\n" + " 'k2' = 'v2'\n" + ")"); } @Test public void testAlterDatabase() { sql("alter database db1 set dbproperties('k1'='v1')") .ok("ALTER DATABASE `DB1` SET DBPROPERTIES (\n" + " 'k1' = 'v1'\n" + ")"); sql("alter database db1 set location '/new/path'") .ok("ALTER DATABASE `DB1` SET LOCATION '/new/path'"); sql("alter database db1 set owner user user1") .ok("ALTER DATABASE `DB1` SET OWNER USER `USER1`"); sql("alter database db1 set owner role role1") .ok("ALTER DATABASE `DB1` SET OWNER ROLE `ROLE1`"); } @Test public void testDropDatabase() { sql("drop schema db1").ok("DROP DATABASE `DB1` RESTRICT"); sql("drop database db1 cascade").ok("DROP DATABASE `DB1` CASCADE"); } @Test public void testDescribeDatabase() { sql("describe schema db1").ok("DESCRIBE DATABASE `DB1`"); sql("describe database extended db1").ok("DESCRIBE DATABASE EXTENDED `DB1`"); } @Test public void testShowTables() { sql("show tables").ok("SHOW TABLES"); } @Test @Test public void testCreateTable() { sql("create table tbl (x int) row format delimited fields terminated by ',' escaped by '\\' " + "collection items terminated by ',' map keys terminated by ':' lines terminated by '\n' " + "null defined as 'null' location '/path/to/table'") .ok("CREATE TABLE `TBL` (\n" + " `X` INTEGER\n" + ")\n" + "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ESCAPED BY '\\' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' LINES TERMINATED BY '\n" + "' NULL DEFINED AS 'null'\n" + "LOCATION '/path/to/table'"); sql("create table tbl (x double) stored as orc tblproperties ('k1'='v1')") .ok("CREATE TABLE `TBL` (\n" + " `X` DOUBLE\n" + ")\n" + "STORED AS `ORC`\n" + "TBLPROPERTIES (\n" + " 'k1' = 'v1'\n" + ")"); sql("create table tbl (x decimal(5,2)) row format serde 'serde.class.name' with serdeproperties ('serde.k1'='v1')") .ok("CREATE TABLE `TBL` (\n" + " `X` DECIMAL(5, 2)\n" + ")\n" + "ROW FORMAT SERDE 'serde.class.name' WITH SERDEPROPERTIES (\n" + " 'serde.k1' = 'v1'\n" + ")"); sql("create table tbl (x date) row format delimited fields terminated by '\u0001' " + "stored as inputformat 'input.format.class' outputformat 'output.format.class'") .ok("CREATE TABLE `TBL` (\n" + " `X` DATE\n" + ")\n" + "ROW FORMAT DELIMITED FIELDS TERMINATED BY u&'\\0001'\n" + "STORED AS INPUTFORMAT 'input.format.class' OUTPUTFORMAT 'output.format.class'"); sql("create table tbl (x struct<f1:timestamp,f2:int>) partitioned by (p1 string,p2 bigint) stored as rcfile") .ok("CREATE TABLE `TBL` (\n" + " `X` ROW< `F1` TIMESTAMP(9), `F2` INTEGER >\n" + ")\n" + "PARTITIONED BY (\n" + " `P1` STRING,\n" + " `P2` BIGINT\n" + ")\n" + "STORED AS `RCFILE`"); sql("create external table tbl (x map<timestamp,array<timestamp>>) location '/table/path'") .ok("CREATE EXTERNAL TABLE `TBL` (\n" + " `X` MAP< TIMESTAMP(9), ARRAY< TIMESTAMP(9) > >\n" + ")\n" + "LOCATION '/table/path'"); sql("create temporary table tbl (x varchar(50)) partitioned by (p timestamp)") .ok("CREATE TEMPORARY TABLE `TBL` (\n" + " `X` VARCHAR(50)\n" + ")\n" + "PARTITIONED BY (\n" + " `P` TIMESTAMP(9)\n" + ")"); sql("create table tbl (v varchar)").fails("VARCHAR precision is mandatory"); } @Test public void testConstraints() { sql("create table tbl (x int not null enable rely, y string not null disable novalidate norely)") .ok("CREATE TABLE `TBL` (\n" + " `X` INTEGER NOT NULL ENABLE NOVALIDATE RELY,\n" + " `Y` STRING NOT NULL DISABLE NOVALIDATE NORELY\n" + ")"); sql("create table tbl (x int,y timestamp not null,z string,primary key (x,z) disable novalidate rely)") .ok("CREATE TABLE `TBL` (\n" + " `X` INTEGER,\n" + " `Y` TIMESTAMP(9) NOT NULL ENABLE NOVALIDATE RELY,\n" + " `Z` STRING,\n" + " PRIMARY KEY (`X`, `Z`) DISABLE NOVALIDATE RELY\n" + ")"); sql("create table tbl (x binary,y date,z string,constraint pk_cons primary key(x))") .ok("CREATE TABLE `TBL` (\n" + " `X` VARBINARY(2147483647),\n" + " `Y` DATE,\n" + " `Z` STRING,\n" + " CONSTRAINT `PK_CONS` PRIMARY KEY (`X`) ENABLE NOVALIDATE RELY\n" + ")"); } @Test public void testDropTable() { sql("drop table tbl").ok("DROP TABLE `TBL`"); sql("drop table if exists cat.tbl").ok("DROP TABLE IF EXISTS `CAT`.`TBL`"); } }
class FlinkHiveSqlParserImplTest extends SqlParserTest { @Override protected SqlParserImplFactory parserImplFactory() { return FlinkHiveSqlParserImpl.FACTORY; } @Override public void testDescribeStatement() { } @Override public void testTableHintsInInsert() { } @Override public void testDescribeSchema() { } @Test public void testShowDatabases() { sql("show databases").ok("SHOW DATABASES"); } @Test public void testUseDatabase() { sql("use db1").ok("USE `DB1`"); } @Test public void testCreateDatabase() { sql("create database db1") .ok("CREATE DATABASE `DB1`"); sql("create database db1 comment 'comment db1' location '/path/to/db1'") .ok("CREATE DATABASE `DB1`\n" + "COMMENT 'comment db1'\n" + "LOCATION '/path/to/db1'"); sql("create database db1 with dbproperties ('k1'='v1','k2'='v2')") .ok("CREATE DATABASE `DB1` WITH DBPROPERTIES (\n" + " 'k1' = 'v1',\n" + " 'k2' = 'v2'\n" + ")"); } @Test public void testAlterDatabase() { sql("alter database db1 set dbproperties('k1'='v1')") .ok("ALTER DATABASE `DB1` SET DBPROPERTIES (\n" + " 'k1' = 'v1'\n" + ")"); sql("alter database db1 set location '/new/path'") .ok("ALTER DATABASE `DB1` SET LOCATION '/new/path'"); sql("alter database db1 set owner user user1") .ok("ALTER DATABASE `DB1` SET OWNER USER `USER1`"); sql("alter database db1 set owner role role1") .ok("ALTER DATABASE `DB1` SET OWNER ROLE `ROLE1`"); } @Test public void testDropDatabase() { sql("drop schema db1").ok("DROP DATABASE `DB1` RESTRICT"); sql("drop database db1 cascade").ok("DROP DATABASE `DB1` CASCADE"); } @Test public void testDescribeDatabase() { sql("describe schema db1").ok("DESCRIBE DATABASE `DB1`"); sql("describe database extended db1").ok("DESCRIBE DATABASE EXTENDED `DB1`"); } @Test public void testShowTables() { sql("show tables").ok("SHOW TABLES"); } @Test @Test public void testCreateTable() { sql("create table tbl (x int) row format delimited fields terminated by ',' escaped by '\\' " + "collection items terminated by ',' map keys terminated by ':' lines terminated by '\n' " + "null defined as 'null' location '/path/to/table'") .ok("CREATE TABLE `TBL` (\n" + " `X` INTEGER\n" + ")\n" + "ROW FORMAT DELIMITED\n" + " FIELDS TERMINATED BY ',' ESCAPED BY '\\'\n" + " COLLECTION ITEMS TERMINATED BY ','\n" + " MAP KEYS TERMINATED BY ':'\n" + " LINES TERMINATED BY '\n'\n" + " NULL DEFINED AS 'null'\n" + "LOCATION '/path/to/table'"); sql("create table tbl (x double) stored as orc tblproperties ('k1'='v1')") .ok("CREATE TABLE `TBL` (\n" + " `X` DOUBLE\n" + ")\n" + "STORED AS `ORC`\n" + "TBLPROPERTIES (\n" + " 'k1' = 'v1'\n" + ")"); sql("create table tbl (x decimal(5,2)) row format serde 'serde.class.name' with serdeproperties ('serde.k1'='v1')") .ok("CREATE TABLE `TBL` (\n" + " `X` DECIMAL(5, 2)\n" + ")\n" + "ROW FORMAT SERDE 'serde.class.name' WITH SERDEPROPERTIES (\n" + " 'serde.k1' = 'v1'\n" + ")"); sql("create table tbl (x date) row format delimited fields terminated by '\u0001' " + "stored as inputformat 'input.format.class' outputformat 'output.format.class'") .ok("CREATE TABLE `TBL` (\n" + " `X` DATE\n" + ")\n" + "ROW FORMAT DELIMITED\n" + " FIELDS TERMINATED BY u&'\\0001'\n" + "STORED AS INPUTFORMAT 'input.format.class' OUTPUTFORMAT 'output.format.class'"); sql("create table tbl (x struct<f1:timestamp,f2:int>) partitioned by (p1 string,p2 bigint) stored as rcfile") .ok("CREATE TABLE `TBL` (\n" + " `X` STRUCT< `F1` TIMESTAMP, `F2` INTEGER >\n" + ")\n" + "PARTITIONED BY (\n" + " `P1` STRING,\n" + " `P2` BIGINT\n" + ")\n" + "STORED AS `RCFILE`"); sql("create external table tbl (x map<timestamp,array<timestamp>>) location '/table/path'") .ok("CREATE EXTERNAL TABLE `TBL` (\n" + " `X` MAP< TIMESTAMP, ARRAY< TIMESTAMP > >\n" + ")\n" + "LOCATION '/table/path'"); sql("create temporary table tbl (x varchar(50)) partitioned by (p timestamp)") .ok("CREATE TEMPORARY TABLE `TBL` (\n" + " `X` VARCHAR(50)\n" + ")\n" + "PARTITIONED BY (\n" + " `P` TIMESTAMP\n" + ")"); sql("create table tbl (v varchar)").fails("VARCHAR precision is mandatory"); } @Test public void testConstraints() { sql("create table tbl (x int not null enable rely, y string not null disable novalidate norely)") .ok("CREATE TABLE `TBL` (\n" + " `X` INTEGER NOT NULL ENABLE NOVALIDATE RELY,\n" + " `Y` STRING NOT NULL DISABLE NOVALIDATE NORELY\n" + ")"); sql("create table tbl (x int,y timestamp not null,z string,primary key (x,z) disable novalidate rely)") .ok("CREATE TABLE `TBL` (\n" + " `X` INTEGER,\n" + " `Y` TIMESTAMP NOT NULL ENABLE NOVALIDATE RELY,\n" + " `Z` STRING,\n" + " PRIMARY KEY (`X`, `Z`) DISABLE NOVALIDATE RELY\n" + ")"); sql("create table tbl (x binary,y date,z string,constraint pk_cons primary key(x))") .ok("CREATE TABLE `TBL` (\n" + " `X` BINARY,\n" + " `Y` DATE,\n" + " `Z` STRING,\n" + " CONSTRAINT `PK_CONS` PRIMARY KEY (`X`) ENABLE NOVALIDATE RELY\n" + ")"); } @Test public void testDropTable() { sql("drop table tbl").ok("DROP TABLE `TBL`"); sql("drop table if exists cat.tbl").ok("DROP TABLE IF EXISTS `CAT`.`TBL`"); } }
we should call `view.unsynchronizedGetNumberOfQueuedBuffers` instead.
public int unsynchronizedGetNumberOfQueuedBuffers() { ResultSubpartitionView view = subpartitionView; if (view != null) { return subpartitionView.unsynchronizedGetNumberOfQueuedBuffers(); } return 0; }
return subpartitionView.unsynchronizedGetNumberOfQueuedBuffers();
public int unsynchronizedGetNumberOfQueuedBuffers() { ResultSubpartitionView view = subpartitionView; if (view != null) { return view.unsynchronizedGetNumberOfQueuedBuffers(); } return 0; }
class LocalInputChannel extends InputChannel implements BufferAvailabilityListener { private static final Logger LOG = LoggerFactory.getLogger(LocalInputChannel.class); private final Object requestLock = new Object(); /** The local partition manager. */ private final ResultPartitionManager partitionManager; /** Task event dispatcher for backwards events. */ private final TaskEventPublisher taskEventPublisher; /** The consumed subpartition. */ private volatile ResultSubpartitionView subpartitionView; private volatile boolean isReleased; public LocalInputChannel( SingleInputGate inputGate, int channelIndex, ResultPartitionID partitionId, ResultPartitionManager partitionManager, TaskEventPublisher taskEventPublisher, InputChannelMetrics metrics) { this(inputGate, channelIndex, partitionId, partitionManager, taskEventPublisher, 0, 0, metrics); } public LocalInputChannel( SingleInputGate inputGate, int channelIndex, ResultPartitionID partitionId, ResultPartitionManager partitionManager, TaskEventPublisher taskEventPublisher, int initialBackoff, int maxBackoff, InputChannelMetrics metrics) { super(inputGate, channelIndex, partitionId, initialBackoff, maxBackoff, metrics.getNumBytesInLocalCounter(), metrics.getNumBuffersInLocalCounter()); this.partitionManager = checkNotNull(partitionManager); this.taskEventPublisher = checkNotNull(taskEventPublisher); } @Override void requestSubpartition(int subpartitionIndex) throws IOException, InterruptedException { boolean retriggerRequest = false; synchronized (requestLock) { checkState(!isReleased, "LocalInputChannel has been released already"); if (subpartitionView == null) { LOG.debug("{}: Requesting LOCAL subpartition {} of partition {}.", this, subpartitionIndex, partitionId); try { ResultSubpartitionView subpartitionView = partitionManager.createSubpartitionView( partitionId, subpartitionIndex, this); if (subpartitionView == null) { throw new IOException("Error requesting subpartition."); } this.subpartitionView = subpartitionView; if (isReleased) { subpartitionView.releaseAllResources(); this.subpartitionView = null; } } catch (PartitionNotFoundException notFound) { if (increaseBackoff()) { retriggerRequest = true; } else { throw notFound; } } } } if (retriggerRequest) { inputGate.retriggerPartitionRequest(partitionId.getPartitionId()); } } /** * Retriggers a subpartition request. */ void retriggerSubpartitionRequest(Timer timer, final int subpartitionIndex) { synchronized (requestLock) { checkState(subpartitionView == null, "already requested partition"); timer.schedule(new TimerTask() { @Override public void run() { try { requestSubpartition(subpartitionIndex); } catch (Throwable t) { setError(t); } } }, getCurrentBackoff()); } } @Override Optional<BufferAndAvailability> getNextBuffer() throws IOException, InterruptedException { checkError(); ResultSubpartitionView subpartitionView = this.subpartitionView; if (subpartitionView == null) { if (isReleased) { return Optional.empty(); } subpartitionView = checkAndWaitForSubpartitionView(); } BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next == null) { if (subpartitionView.isReleased()) { throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released."); } else { return Optional.empty(); } } numBytesIn.inc(next.buffer().getSize()); numBuffersIn.inc(); return Optional.of(new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog())); } @Override public void notifyDataAvailable() { notifyChannelNonEmpty(); } private ResultSubpartitionView checkAndWaitForSubpartitionView() { synchronized (requestLock) { checkState(!isReleased, "released"); checkState(subpartitionView != null, "Queried for a buffer before requesting the subpartition."); return subpartitionView; } } @Override void sendTaskEvent(TaskEvent event) throws IOException { checkError(); checkState(subpartitionView != null, "Tried to send task event to producer before requesting the subpartition."); if (!taskEventPublisher.publish(partitionId, event)) { throw new IOException("Error while publishing event " + event + " to producer. The producer could not be found."); } } @Override boolean isReleased() { return isReleased; } @Override void notifySubpartitionConsumed() throws IOException { if (subpartitionView != null) { subpartitionView.notifySubpartitionConsumed(); } } /** * Releases the partition reader. */ @Override void releaseAllResources() throws IOException { if (!isReleased) { isReleased = true; ResultSubpartitionView view = subpartitionView; if (view != null) { view.releaseAllResources(); subpartitionView = null; } } } @Override @Override public String toString() { return "LocalInputChannel [" + partitionId + "]"; } }
class LocalInputChannel extends InputChannel implements BufferAvailabilityListener { private static final Logger LOG = LoggerFactory.getLogger(LocalInputChannel.class); private final Object requestLock = new Object(); /** The local partition manager. */ private final ResultPartitionManager partitionManager; /** Task event dispatcher for backwards events. */ private final TaskEventPublisher taskEventPublisher; /** The consumed subpartition. */ private volatile ResultSubpartitionView subpartitionView; private volatile boolean isReleased; public LocalInputChannel( SingleInputGate inputGate, int channelIndex, ResultPartitionID partitionId, ResultPartitionManager partitionManager, TaskEventPublisher taskEventPublisher, InputChannelMetrics metrics) { this(inputGate, channelIndex, partitionId, partitionManager, taskEventPublisher, 0, 0, metrics); } public LocalInputChannel( SingleInputGate inputGate, int channelIndex, ResultPartitionID partitionId, ResultPartitionManager partitionManager, TaskEventPublisher taskEventPublisher, int initialBackoff, int maxBackoff, InputChannelMetrics metrics) { super(inputGate, channelIndex, partitionId, initialBackoff, maxBackoff, metrics.getNumBytesInLocalCounter(), metrics.getNumBuffersInLocalCounter()); this.partitionManager = checkNotNull(partitionManager); this.taskEventPublisher = checkNotNull(taskEventPublisher); } @Override void requestSubpartition(int subpartitionIndex) throws IOException, InterruptedException { boolean retriggerRequest = false; synchronized (requestLock) { checkState(!isReleased, "LocalInputChannel has been released already"); if (subpartitionView == null) { LOG.debug("{}: Requesting LOCAL subpartition {} of partition {}.", this, subpartitionIndex, partitionId); try { ResultSubpartitionView subpartitionView = partitionManager.createSubpartitionView( partitionId, subpartitionIndex, this); if (subpartitionView == null) { throw new IOException("Error requesting subpartition."); } this.subpartitionView = subpartitionView; if (isReleased) { subpartitionView.releaseAllResources(); this.subpartitionView = null; } } catch (PartitionNotFoundException notFound) { if (increaseBackoff()) { retriggerRequest = true; } else { throw notFound; } } } } if (retriggerRequest) { inputGate.retriggerPartitionRequest(partitionId.getPartitionId()); } } /** * Retriggers a subpartition request. */ void retriggerSubpartitionRequest(Timer timer, final int subpartitionIndex) { synchronized (requestLock) { checkState(subpartitionView == null, "already requested partition"); timer.schedule(new TimerTask() { @Override public void run() { try { requestSubpartition(subpartitionIndex); } catch (Throwable t) { setError(t); } } }, getCurrentBackoff()); } } @Override Optional<BufferAndAvailability> getNextBuffer() throws IOException, InterruptedException { checkError(); ResultSubpartitionView subpartitionView = this.subpartitionView; if (subpartitionView == null) { if (isReleased) { return Optional.empty(); } subpartitionView = checkAndWaitForSubpartitionView(); } BufferAndBacklog next = subpartitionView.getNextBuffer(); if (next == null) { if (subpartitionView.isReleased()) { throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released."); } else { return Optional.empty(); } } numBytesIn.inc(next.buffer().getSize()); numBuffersIn.inc(); return Optional.of(new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog())); } @Override public void notifyDataAvailable() { notifyChannelNonEmpty(); } private ResultSubpartitionView checkAndWaitForSubpartitionView() { synchronized (requestLock) { checkState(!isReleased, "released"); checkState(subpartitionView != null, "Queried for a buffer before requesting the subpartition."); return subpartitionView; } } @Override void sendTaskEvent(TaskEvent event) throws IOException { checkError(); checkState(subpartitionView != null, "Tried to send task event to producer before requesting the subpartition."); if (!taskEventPublisher.publish(partitionId, event)) { throw new IOException("Error while publishing event " + event + " to producer. The producer could not be found."); } } @Override boolean isReleased() { return isReleased; } @Override void notifySubpartitionConsumed() throws IOException { if (subpartitionView != null) { subpartitionView.notifySubpartitionConsumed(); } } /** * Releases the partition reader. */ @Override void releaseAllResources() throws IOException { if (!isReleased) { isReleased = true; ResultSubpartitionView view = subpartitionView; if (view != null) { view.releaseAllResources(); subpartitionView = null; } } } @Override @Override public String toString() { return "LocalInputChannel [" + partitionId + "]"; } }
That's a very good point and I have to admit that I need to check the code and tests...
public FieldInjector(Field field, Object testInstance) throws Exception { this.field = field; ArcContainer container = Arc.container(); BeanManager beanManager = container.beanManager(); java.lang.reflect.Type requiredType = field.getGenericType(); Annotation[] qualifiers = getQualifiers(field, beanManager); Object injectedInstance; if (qualifiers.length == 1 && qualifiers[0].equals(All.Literal.INSTANCE)) { if (isListRequiredType(requiredType)) { List<InstanceHandle<Object>> handles = container.listAll(requiredType, qualifiers); if (isTypeArgumentInstanceHandle(requiredType)) { injectedInstance = handles; } else { injectedInstance = handles.stream().map(InstanceHandle::get).collect(Collectors.toUnmodifiableList()); } unsetHandles = cast(handles); } else { throw new IllegalStateException("Invalid injection point type: " + field); } } else { InstanceHandle<?> handle = container.instance(requiredType, qualifiers); if (field.isAnnotationPresent(Inject.class)) { if (handle.getBean().getKind() == io.quarkus.arc.InjectableBean.Kind.SYNTHETIC) { throw new IllegalStateException(String .format("The injected field %s expects a real component; but obtained: %s", field, handle.getBean())); } } else { if (handle.getBean().getKind() != io.quarkus.arc.InjectableBean.Kind.SYNTHETIC) { throw new IllegalStateException(String .format("The injected field %s expects a mocked bean; but obtained: %s", field, handle.getBean())); } } injectedInstance = handle.get(); unsetHandles = List.of(handle); } field.setAccessible(true); field.set(testInstance, injectedInstance); }
if (isListRequiredType(requiredType)) {
public FieldInjector(Field field, Object testInstance) throws Exception { this.field = field; ArcContainer container = Arc.container(); BeanManager beanManager = container.beanManager(); java.lang.reflect.Type requiredType = field.getGenericType(); Annotation[] qualifiers = getQualifiers(field, beanManager); Object injectedInstance; if (qualifiers.length > 0 && Arrays.stream(qualifiers).anyMatch(All.Literal.INSTANCE::equals)) { if (isListRequiredType(requiredType)) { List<InstanceHandle<Object>> handles = container.listAll(requiredType, qualifiers); if (isTypeArgumentInstanceHandle(requiredType)) { injectedInstance = handles; } else { injectedInstance = handles.stream().map(InstanceHandle::get).collect(Collectors.toUnmodifiableList()); } unsetHandles = cast(handles); } else { throw new IllegalStateException("Invalid injection point type: " + field); } } else { InstanceHandle<?> handle = container.instance(requiredType, qualifiers); if (field.isAnnotationPresent(Inject.class)) { if (handle.getBean().getKind() == io.quarkus.arc.InjectableBean.Kind.SYNTHETIC) { throw new IllegalStateException(String .format("The injected field %s expects a real component; but obtained: %s", field, handle.getBean())); } } else { if (handle.getBean().getKind() != io.quarkus.arc.InjectableBean.Kind.SYNTHETIC) { throw new IllegalStateException(String .format("The injected field %s expects a mocked bean; but obtained: %s", field, handle.getBean())); } } injectedInstance = handle.get(); unsetHandles = List.of(handle); } field.setAccessible(true); field.set(testInstance, injectedInstance); }
class FieldInjector { private final Field field; private final List<InstanceHandle<?>> unsetHandles; void unset(Object testInstance) throws Exception { for (InstanceHandle<?> handle : unsetHandles) { if (handle.getBean() != null && handle.getBean().getScope().equals(Dependent.class)) { try { handle.destroy(); } catch (Exception e) { LOG.errorf(e, "Unable to destroy the injected %s", handle.getBean()); } } } field.setAccessible(true); field.set(testInstance, null); } }
class FieldInjector { private final Field field; private final List<InstanceHandle<?>> unsetHandles; void unset(Object testInstance) throws Exception { for (InstanceHandle<?> handle : unsetHandles) { if (handle.getBean() != null && handle.getBean().getScope().equals(Dependent.class)) { try { handle.destroy(); } catch (Exception e) { LOG.errorf(e, "Unable to destroy the injected %s", handle.getBean()); } } } field.setAccessible(true); field.set(testInstance, null); } }
Why check is isMaterializedView in this function instead of check this outside and call getPartitionNamesToRefreshForMv dirctly?
public Set<String> getUpdatedPartitionNamesOfTable(Table base, boolean withMv) { if (!base.isLocalTable()) { return Sets.newHashSet(); } OlapTable baseTable = (OlapTable) base; Map<String, BasePartitionInfo> baseTableVisibleVersionMap = getRefreshScheme() .getAsyncRefreshContext() .getBaseTableVisibleVersionMap() .computeIfAbsent(baseTable.getId(), k -> Maps.newHashMap()); Set<String> result = Sets.newHashSet(); for (String partitionName : baseTable.getPartitionNames()) { if (!baseTableVisibleVersionMap.containsKey(partitionName) && baseTable.getPartition(partitionName).getVisibleVersion() != 1) { result.add(partitionName); } } for (Map.Entry<String, BasePartitionInfo> versionEntry : baseTableVisibleVersionMap.entrySet()) { String basePartitionName = versionEntry.getKey(); Partition basePartition = baseTable.getPartition(basePartitionName); if (basePartition == null) { result.addAll(baseTable.getPartitionNames()); return result; } BasePartitionInfo basePartitionInfo = versionEntry.getValue(); if (basePartitionInfo == null || basePartitionInfo.getId() != basePartition.getId() || basePartition.getVisibleVersion() > basePartitionInfo.getVersion()) { result.add(basePartitionName); } } if (withMv && baseTable.isMaterializedView()) { Set<String> partitionNames = ((MaterializedView) baseTable).getPartitionNamesToRefreshForMv(); result.addAll(partitionNames); } return result; }
Set<String> partitionNames = ((MaterializedView) baseTable).getPartitionNamesToRefreshForMv();
public Set<String> getUpdatedPartitionNamesOfTable(Table base, boolean withMv) { if (!base.isLocalTable()) { return Sets.newHashSet(); } OlapTable baseTable = (OlapTable) base; Map<String, BasePartitionInfo> baseTableVisibleVersionMap = getRefreshScheme() .getAsyncRefreshContext() .getBaseTableVisibleVersionMap() .computeIfAbsent(baseTable.getId(), k -> Maps.newHashMap()); Set<String> result = Sets.newHashSet(); for (String partitionName : baseTable.getPartitionNames()) { if (!baseTableVisibleVersionMap.containsKey(partitionName) && baseTable.getPartition(partitionName).getVisibleVersion() != 1) { result.add(partitionName); } } for (Map.Entry<String, BasePartitionInfo> versionEntry : baseTableVisibleVersionMap.entrySet()) { String basePartitionName = versionEntry.getKey(); Partition basePartition = baseTable.getPartition(basePartitionName); if (basePartition == null) { result.addAll(baseTable.getPartitionNames()); return result; } BasePartitionInfo basePartitionInfo = versionEntry.getValue(); if (basePartitionInfo == null || basePartitionInfo.getId() != basePartition.getId() || basePartition.getVisibleVersion() > basePartitionInfo.getVersion()) { result.add(basePartitionName); } } if (withMv && baseTable.isMaterializedView()) { Set<String> partitionNames = ((MaterializedView) baseTable).getPartitionNamesToRefreshForMv(); result.addAll(partitionNames); } return result; }
class MvRefreshScheme { @SerializedName(value = "type") private RefreshType type; @SerializedName(value = "asyncRefreshContext") private AsyncRefreshContext asyncRefreshContext; @SerializedName(value = "lastRefreshTime") private long lastRefreshTime; public MvRefreshScheme() { this.type = RefreshType.ASYNC; this.asyncRefreshContext = new AsyncRefreshContext(); this.lastRefreshTime = 0; } public boolean isIncremental() { return this.type.equals(RefreshType.INCREMENTAL); } public RefreshType getType() { return type; } public void setType(RefreshType type) { this.type = type; } public AsyncRefreshContext getAsyncRefreshContext() { return asyncRefreshContext; } public void setAsyncRefreshContext(AsyncRefreshContext asyncRefreshContext) { this.asyncRefreshContext = asyncRefreshContext; } public long getLastRefreshTime() { return lastRefreshTime; } public void setLastRefreshTime(long lastRefreshTime) { this.lastRefreshTime = lastRefreshTime; } }
class MvRefreshScheme { @SerializedName(value = "type") private RefreshType type; @SerializedName(value = "asyncRefreshContext") private AsyncRefreshContext asyncRefreshContext; @SerializedName(value = "lastRefreshTime") private long lastRefreshTime; public MvRefreshScheme() { this.type = RefreshType.ASYNC; this.asyncRefreshContext = new AsyncRefreshContext(); this.lastRefreshTime = 0; } public boolean isIncremental() { return this.type.equals(RefreshType.INCREMENTAL); } public RefreshType getType() { return type; } public void setType(RefreshType type) { this.type = type; } public AsyncRefreshContext getAsyncRefreshContext() { return asyncRefreshContext; } public void setAsyncRefreshContext(AsyncRefreshContext asyncRefreshContext) { this.asyncRefreshContext = asyncRefreshContext; } public long getLastRefreshTime() { return lastRefreshTime; } public void setLastRefreshTime(long lastRefreshTime) { this.lastRefreshTime = lastRefreshTime; } }
```suggestion final Optional<AllocatedSlot> freedSlot = slotPool.freeReservedSlot(allocationId, currentTime); ```
public ResourceCounter freeReservedSlot(AllocationID allocationId, @Nullable Throwable cause, long currentTime) { LOG.debug("Release slot {}.", allocationId); final Optional<AllocatedSlot> releasedSlot = slotPool.freeReservedSlot(allocationId, currentTime); Optional<ResourceCounter> previouslyFulfilledRequirement = releasedSlot.map(Collections::singleton).map(this::getFulfilledRequirements); releasedSlot.ifPresent(allocatedSlot -> { releasePayload(Collections.singleton(allocatedSlot), cause); tryToFulfillResourceRequirement(allocatedSlot); notifyNewSlots.accept(Collections.singletonList(allocatedSlot)); }); return previouslyFulfilledRequirement.orElseGet(ResourceCounter::empty); }
final Optional<AllocatedSlot> releasedSlot = slotPool.freeReservedSlot(allocationId, currentTime);
public ResourceCounter freeReservedSlot(AllocationID allocationId, @Nullable Throwable cause, long currentTime) { LOG.debug("Free reserved slot {}.", allocationId); final Optional<AllocatedSlot> freedSlot = slotPool.freeReservedSlot(allocationId, currentTime); Optional<ResourceCounter> previouslyFulfilledRequirement = freedSlot.map(Collections::singleton).map(this::getFulfilledRequirements); freedSlot.ifPresent(allocatedSlot -> { releasePayload(Collections.singleton(allocatedSlot), cause); tryToFulfillResourceRequirement(allocatedSlot); notifyNewSlots.accept(Collections.singletonList(allocatedSlot)); }); return previouslyFulfilledRequirement.orElseGet(ResourceCounter::empty); }
class DefaultDeclarativeSlotPool implements DeclarativeSlotPool { private static final Logger LOG = LoggerFactory.getLogger(DefaultDeclarativeSlotPool.class); private final Consumer<? super Collection<ResourceRequirement>> notifyNewResourceRequirements; private final Consumer<? super Collection<? extends PhysicalSlot>> notifyNewSlots; private final Time idleSlotTimeout; private final Time rpcTimeout; private final AllocatedSlotPool slotPool; private final Map<AllocationID, ResourceProfile> slotToRequirementProfileMappings; private ResourceCounter totalResourceRequirements; private ResourceCounter fulfilledResourceRequirements; private final RequirementMatcher requirementMatcher = new DefaultRequirementMatcher(); public DefaultDeclarativeSlotPool( AllocatedSlotPool slotPool, Consumer<? super Collection<ResourceRequirement>> notifyNewResourceRequirements, Consumer<? super Collection<? extends PhysicalSlot>> notifyNewSlots, Time idleSlotTimeout, Time rpcTimeout) { this.slotPool = slotPool; this.notifyNewResourceRequirements = notifyNewResourceRequirements; this.notifyNewSlots = notifyNewSlots; this.idleSlotTimeout = idleSlotTimeout; this.rpcTimeout = rpcTimeout; this.totalResourceRequirements = ResourceCounter.empty(); this.fulfilledResourceRequirements = ResourceCounter.empty(); this.slotToRequirementProfileMappings = new HashMap<>(); } @Override public void increaseResourceRequirementsBy(ResourceCounter increment) { totalResourceRequirements = totalResourceRequirements.add(increment); declareResourceRequirements(); } @Override public void decreaseResourceRequirementsBy(ResourceCounter decrement) { totalResourceRequirements = totalResourceRequirements.subtract(decrement); declareResourceRequirements(); } private void declareResourceRequirements() { notifyNewResourceRequirements.accept(getResourceRequirements()); } @Override public Collection<ResourceRequirement> getResourceRequirements() { final Collection<ResourceRequirement> currentResourceRequirements = new ArrayList<>(); for (Map.Entry<ResourceProfile, Integer> resourceRequirement : totalResourceRequirements.getResourcesWithCount()) { currentResourceRequirements.add(ResourceRequirement.create(resourceRequirement.getKey(), resourceRequirement.getValue())); } return currentResourceRequirements; } @Override public Collection<SlotOffer> offerSlots( Collection<? extends SlotOffer> offers, TaskManagerLocation taskManagerLocation, TaskManagerGateway taskManagerGateway, long currentTime) { LOG.debug("Received {} slot offers from TaskExecutor {}.", offers.size(), taskManagerLocation); final Collection<SlotOffer> acceptedSlotOffers = new ArrayList<>(); final Collection<AllocatedSlot> acceptedSlots = new ArrayList<>(); for (SlotOffer offer : offers) { if (slotPool.containsSlot(offer.getAllocationId())) { acceptedSlotOffers.add(offer); } else { Optional<AllocatedSlot> acceptedSlot = matchOfferWithOutstandingRequirements(offer, taskManagerLocation, taskManagerGateway); if (acceptedSlot.isPresent()) { acceptedSlotOffers.add(offer); acceptedSlots.add(acceptedSlot.get()); } } } slotPool.addSlots(acceptedSlots, currentTime); if (!acceptedSlots.isEmpty()) { notifyNewSlots.accept(acceptedSlots); } return acceptedSlotOffers; } private Optional<AllocatedSlot> matchOfferWithOutstandingRequirements( SlotOffer slotOffer, TaskManagerLocation taskManagerLocation, TaskManagerGateway taskManagerGateway) { final Optional<ResourceProfile> match = requirementMatcher.match( slotOffer.getResourceProfile(), totalResourceRequirements.getResourcesWithCount(), fulfilledResourceRequirements::getResourceCount); if (match.isPresent()) { increaseAvailableResources(ResourceCounter.withResource(match.get(), 1)); final AllocatedSlot allocatedSlot = createAllocatedSlot( slotOffer, taskManagerLocation, taskManagerGateway); slotToRequirementProfileMappings.put(allocatedSlot.getAllocationId(), match.get()); return Optional.of(allocatedSlot); } return Optional.empty(); } @VisibleForTesting ResourceCounter calculateUnfulfilledResources() { return totalResourceRequirements.subtract(fulfilledResourceRequirements); } private AllocatedSlot createAllocatedSlot( SlotOffer slotOffer, TaskManagerLocation taskManagerLocation, TaskManagerGateway taskManagerGateway) { return new AllocatedSlot( slotOffer.getAllocationId(), taskManagerLocation, slotOffer.getSlotIndex(), slotOffer.getResourceProfile(), taskManagerGateway); } private void increaseAvailableResources(ResourceCounter acceptedResources) { fulfilledResourceRequirements = fulfilledResourceRequirements.add(acceptedResources); } @Nonnull private ResourceProfile getMatchingResourceProfile(AllocationID allocationId) { return Preconditions.checkNotNull(slotToRequirementProfileMappings.get(allocationId), "No matching resource profile found for %s", allocationId); } @Override public PhysicalSlot reserveFreeSlot(AllocationID allocationId, ResourceProfile requiredSlotProfile) { final AllocatedSlot allocatedSlot = slotPool.reserveFreeSlot(allocationId); Preconditions.checkState(allocatedSlot.getResourceProfile().isMatching(requiredSlotProfile), ""); ResourceProfile previouslyMatchedResourceProfile = Preconditions.checkNotNull(slotToRequirementProfileMappings.get(allocationId)); if (!previouslyMatchedResourceProfile.equals(requiredSlotProfile)) { updateSlotToRequirementProfileMapping(allocationId, requiredSlotProfile); adjustRequirements(previouslyMatchedResourceProfile, requiredSlotProfile); } return allocatedSlot; } @Override private void tryToFulfillResourceRequirement(AllocatedSlot allocatedSlot) { matchOfferWithOutstandingRequirements(allocatedSlotToSlotOffer(allocatedSlot), allocatedSlot.getTaskManagerLocation(), allocatedSlot.getTaskManagerGateway()); } private void updateSlotToRequirementProfileMapping(AllocationID allocationId, ResourceProfile matchedResourceProfile) { final ResourceProfile oldResourceProfile = Preconditions.checkNotNull(slotToRequirementProfileMappings.put(allocationId, matchedResourceProfile), "Expected slot profile matching to be non-empty."); fulfilledResourceRequirements = fulfilledResourceRequirements.add(matchedResourceProfile, 1); fulfilledResourceRequirements = fulfilledResourceRequirements.subtract(oldResourceProfile, 1); } private void adjustRequirements(ResourceProfile oldResourceProfile, ResourceProfile newResourceProfile) { decreaseResourceRequirementsBy(ResourceCounter.withResource(newResourceProfile, 1)); increaseResourceRequirementsBy(ResourceCounter.withResource(oldResourceProfile, 1)); } @Nonnull private SlotOffer allocatedSlotToSlotOffer(AllocatedSlot allocatedSlot) { return new SlotOffer(allocatedSlot.getAllocationId(), allocatedSlot.getPhysicalSlotNumber(), allocatedSlot.getResourceProfile()); } @Override public ResourceCounter releaseSlots(ResourceID owner, Exception cause) { final Collection<AllocatedSlot> removedSlots = slotPool.removeSlots(owner); ResourceCounter previouslyFulfilledRequirements = getFulfilledRequirements(removedSlots); releasePayload(removedSlots, cause); releaseSlots(removedSlots, cause); return previouslyFulfilledRequirements; } @Override public ResourceCounter releaseSlot(AllocationID allocationId, Exception cause) { final Optional<AllocatedSlot> removedSlot = slotPool.removeSlot(allocationId); Optional<ResourceCounter> previouslyFulfilledRequirement = removedSlot.map(Collections::singleton).map(this::getFulfilledRequirements); removedSlot.ifPresent(allocatedSlot -> { releasePayload(Collections.singleton(allocatedSlot), cause); releaseSlots(Collections.singleton(allocatedSlot), cause); }); return previouslyFulfilledRequirement.orElseGet(ResourceCounter::empty); } private void releasePayload(Iterable<? extends AllocatedSlot> allocatedSlots, Throwable cause) { for (AllocatedSlot allocatedSlot : allocatedSlots) { allocatedSlot.releasePayload(cause); } } @Override public void releaseIdleSlots(long currentTimeMillis) { final Collection<AllocatedSlotPool.FreeSlotInfo> freeSlotsInformation = slotPool.getFreeSlotsInformation(); ResourceCounter excessResources = fulfilledResourceRequirements.subtract(totalResourceRequirements); final Iterator<AllocatedSlotPool.FreeSlotInfo> freeSlotIterator = freeSlotsInformation.iterator(); final Collection<AllocatedSlot> slotsToReturnToOwner = new ArrayList<>(); while (!excessResources.isEmpty() && freeSlotIterator.hasNext()) { final AllocatedSlotPool.FreeSlotInfo idleSlot = freeSlotIterator.next(); if (currentTimeMillis >= idleSlot.getFreeSince() + idleSlotTimeout.toMilliseconds()) { final ResourceProfile matchingProfile = getMatchingResourceProfile(idleSlot.getAllocationId()); if (excessResources.containsResource(matchingProfile)) { excessResources = excessResources.subtract(matchingProfile, 1); final Optional<AllocatedSlot> removedSlot = slotPool.removeSlot(idleSlot.getAllocationId()); final AllocatedSlot allocatedSlot = removedSlot.orElseThrow(() -> new IllegalStateException(String.format("Could not find slot for allocation id %s.", idleSlot.getAllocationId()))); slotsToReturnToOwner.add(allocatedSlot); } } } releaseSlots(slotsToReturnToOwner, new FlinkException("Returning idle slots to their owners.")); } private void releaseSlots(Iterable<AllocatedSlot> slotsToReturnToOwner, Throwable cause) { for (AllocatedSlot slotToReturn : slotsToReturnToOwner) { Preconditions.checkState(!slotToReturn.isUsed(), "Free slot must not be used."); LOG.info("Releasing slot [{}].", slotToReturn.getAllocationId()); final ResourceProfile matchingResourceProfile = getMatchingResourceProfile(slotToReturn.getAllocationId()); fulfilledResourceRequirements = fulfilledResourceRequirements.subtract(matchingResourceProfile, 1); slotToRequirementProfileMappings.remove(slotToReturn.getAllocationId()); final CompletableFuture<Acknowledge> freeSlotFuture = slotToReturn.getTaskManagerGateway().freeSlot( slotToReturn.getAllocationId(), cause, rpcTimeout); freeSlotFuture.whenComplete((Acknowledge ignored, Throwable throwable) -> { if (throwable != null) { LOG.debug("Releasing slot [{}] of registered TaskExecutor {} failed. Discarding slot.", slotToReturn.getAllocationId(), slotToReturn.getTaskManagerId(), throwable); } }); } } @Override public Collection<SlotInfoWithUtilization> getFreeSlotsInformation() { return slotPool.getFreeSlotsInformation().stream() .map(AllocatedSlotPool.FreeSlotInfo::asSlotInfo) .collect(Collectors.toList()); } @Override public Collection<? extends SlotInfo> getAllSlotsInformation() { return slotPool.getAllSlotsInformation(); } @Override public boolean containsSlots(ResourceID owner) { return slotPool.containsSlots(owner); } private ResourceCounter getFulfilledRequirements(Iterable<? extends AllocatedSlot> allocatedSlots) { ResourceCounter resourceDecrement = ResourceCounter.empty(); for (AllocatedSlot allocatedSlot : allocatedSlots) { final ResourceProfile matchingResourceProfile = getMatchingResourceProfile(allocatedSlot.getAllocationId()); resourceDecrement = resourceDecrement.add(matchingResourceProfile, 1); } return resourceDecrement; } @VisibleForTesting ResourceCounter getFulfilledResourceRequirements() { return fulfilledResourceRequirements; } private static final class SlotOfferMatching { private final SlotOffer slotOffer; @Nullable private final ResourceProfile matching; private SlotOfferMatching(SlotOffer slotOffer, @Nullable ResourceProfile matching) { this.slotOffer = slotOffer; this.matching = matching; } private SlotOffer getSlotOffer() { return slotOffer; } private Optional<ResourceProfile> getMatching() { return Optional.ofNullable(matching); } private static SlotOfferMatching createMatching(SlotOffer slotOffer, ResourceProfile matching) { return new SlotOfferMatching(slotOffer, matching); } private static SlotOfferMatching createMismatch(SlotOffer slotOffer) { return new SlotOfferMatching(slotOffer, null); } } }
class DefaultDeclarativeSlotPool implements DeclarativeSlotPool { private static final Logger LOG = LoggerFactory.getLogger(DefaultDeclarativeSlotPool.class); private final Consumer<? super Collection<ResourceRequirement>> notifyNewResourceRequirements; private final Consumer<? super Collection<? extends PhysicalSlot>> notifyNewSlots; private final Time idleSlotTimeout; private final Time rpcTimeout; private final AllocatedSlotPool slotPool; private final Map<AllocationID, ResourceProfile> slotToRequirementProfileMappings; private ResourceCounter totalResourceRequirements; private ResourceCounter fulfilledResourceRequirements; private final RequirementMatcher requirementMatcher = new DefaultRequirementMatcher(); public DefaultDeclarativeSlotPool( AllocatedSlotPool slotPool, Consumer<? super Collection<ResourceRequirement>> notifyNewResourceRequirements, Consumer<? super Collection<? extends PhysicalSlot>> notifyNewSlots, Time idleSlotTimeout, Time rpcTimeout) { this.slotPool = slotPool; this.notifyNewResourceRequirements = notifyNewResourceRequirements; this.notifyNewSlots = notifyNewSlots; this.idleSlotTimeout = idleSlotTimeout; this.rpcTimeout = rpcTimeout; this.totalResourceRequirements = ResourceCounter.empty(); this.fulfilledResourceRequirements = ResourceCounter.empty(); this.slotToRequirementProfileMappings = new HashMap<>(); } @Override public void increaseResourceRequirementsBy(ResourceCounter increment) { totalResourceRequirements = totalResourceRequirements.add(increment); declareResourceRequirements(); } @Override public void decreaseResourceRequirementsBy(ResourceCounter decrement) { totalResourceRequirements = totalResourceRequirements.subtract(decrement); declareResourceRequirements(); } private void declareResourceRequirements() { notifyNewResourceRequirements.accept(getResourceRequirements()); } @Override public Collection<ResourceRequirement> getResourceRequirements() { final Collection<ResourceRequirement> currentResourceRequirements = new ArrayList<>(); for (Map.Entry<ResourceProfile, Integer> resourceRequirement : totalResourceRequirements.getResourcesWithCount()) { currentResourceRequirements.add(ResourceRequirement.create(resourceRequirement.getKey(), resourceRequirement.getValue())); } return currentResourceRequirements; } @Override public Collection<SlotOffer> offerSlots( Collection<? extends SlotOffer> offers, TaskManagerLocation taskManagerLocation, TaskManagerGateway taskManagerGateway, long currentTime) { LOG.debug("Received {} slot offers from TaskExecutor {}.", offers.size(), taskManagerLocation); final Collection<SlotOffer> acceptedSlotOffers = new ArrayList<>(); final Collection<AllocatedSlot> acceptedSlots = new ArrayList<>(); for (SlotOffer offer : offers) { if (slotPool.containsSlot(offer.getAllocationId())) { acceptedSlotOffers.add(offer); } else { Optional<AllocatedSlot> acceptedSlot = matchOfferWithOutstandingRequirements(offer, taskManagerLocation, taskManagerGateway); if (acceptedSlot.isPresent()) { acceptedSlotOffers.add(offer); acceptedSlots.add(acceptedSlot.get()); } } } slotPool.addSlots(acceptedSlots, currentTime); if (!acceptedSlots.isEmpty()) { notifyNewSlots.accept(acceptedSlots); } return acceptedSlotOffers; } private Optional<AllocatedSlot> matchOfferWithOutstandingRequirements( SlotOffer slotOffer, TaskManagerLocation taskManagerLocation, TaskManagerGateway taskManagerGateway) { final Optional<ResourceProfile> match = requirementMatcher.match( slotOffer.getResourceProfile(), totalResourceRequirements.getResourcesWithCount(), fulfilledResourceRequirements::getResourceCount); if (match.isPresent()) { final ResourceProfile matchedRequirement = match.get(); LOG.debug("Matched slot offer {} to requirement {}.", slotOffer.getAllocationId(), matchedRequirement); increaseAvailableResources(ResourceCounter.withResource(matchedRequirement, 1)); final AllocatedSlot allocatedSlot = createAllocatedSlot( slotOffer, taskManagerLocation, taskManagerGateway); slotToRequirementProfileMappings.put(allocatedSlot.getAllocationId(), matchedRequirement); return Optional.of(allocatedSlot); } return Optional.empty(); } @VisibleForTesting ResourceCounter calculateUnfulfilledResources() { return totalResourceRequirements.subtract(fulfilledResourceRequirements); } private AllocatedSlot createAllocatedSlot( SlotOffer slotOffer, TaskManagerLocation taskManagerLocation, TaskManagerGateway taskManagerGateway) { return new AllocatedSlot( slotOffer.getAllocationId(), taskManagerLocation, slotOffer.getSlotIndex(), slotOffer.getResourceProfile(), taskManagerGateway); } private void increaseAvailableResources(ResourceCounter acceptedResources) { fulfilledResourceRequirements = fulfilledResourceRequirements.add(acceptedResources); } @Nonnull private ResourceProfile getMatchingResourceProfile(AllocationID allocationId) { return Preconditions.checkNotNull(slotToRequirementProfileMappings.get(allocationId), "No matching resource profile found for %s", allocationId); } @Override public PhysicalSlot reserveFreeSlot(AllocationID allocationId, ResourceProfile requiredSlotProfile) { final AllocatedSlot allocatedSlot = slotPool.reserveFreeSlot(allocationId); Preconditions.checkState( allocatedSlot.getResourceProfile().isMatching(requiredSlotProfile), "Slot {} cannot fulfill the given requirement. SlotProfile={} Requirement={}", allocationId, allocatedSlot.getResourceProfile(), requiredSlotProfile); ResourceProfile previouslyMatchedResourceProfile = Preconditions.checkNotNull(slotToRequirementProfileMappings.get(allocationId)); if (!previouslyMatchedResourceProfile.equals(requiredSlotProfile)) { LOG.debug( "Adjusting requirements because a slot was reserved for a different requirement than initially assumed. Slot={} assumedRequirement={} actualRequirement={}", allocationId, previouslyMatchedResourceProfile, requiredSlotProfile); updateSlotToRequirementProfileMapping(allocationId, requiredSlotProfile); adjustRequirements(previouslyMatchedResourceProfile, requiredSlotProfile); } return allocatedSlot; } @Override private void tryToFulfillResourceRequirement(AllocatedSlot allocatedSlot) { matchOfferWithOutstandingRequirements(allocatedSlotToSlotOffer(allocatedSlot), allocatedSlot.getTaskManagerLocation(), allocatedSlot.getTaskManagerGateway()); } private void updateSlotToRequirementProfileMapping(AllocationID allocationId, ResourceProfile matchedResourceProfile) { final ResourceProfile oldResourceProfile = Preconditions.checkNotNull(slotToRequirementProfileMappings.put(allocationId, matchedResourceProfile), "Expected slot profile matching to be non-empty."); fulfilledResourceRequirements = fulfilledResourceRequirements.add(matchedResourceProfile, 1); fulfilledResourceRequirements = fulfilledResourceRequirements.subtract(oldResourceProfile, 1); } private void adjustRequirements(ResourceProfile oldResourceProfile, ResourceProfile newResourceProfile) { decreaseResourceRequirementsBy(ResourceCounter.withResource(newResourceProfile, 1)); increaseResourceRequirementsBy(ResourceCounter.withResource(oldResourceProfile, 1)); } @Nonnull private SlotOffer allocatedSlotToSlotOffer(AllocatedSlot allocatedSlot) { return new SlotOffer(allocatedSlot.getAllocationId(), allocatedSlot.getPhysicalSlotNumber(), allocatedSlot.getResourceProfile()); } @Override public ResourceCounter releaseSlots(ResourceID owner, Exception cause) { final Collection<AllocatedSlot> removedSlots = slotPool.removeSlots(owner); ResourceCounter previouslyFulfilledRequirements = getFulfilledRequirements(removedSlots); releasePayload(removedSlots, cause); releaseSlots(removedSlots, cause); return previouslyFulfilledRequirements; } @Override public ResourceCounter releaseSlot(AllocationID allocationId, Exception cause) { final Optional<AllocatedSlot> removedSlot = slotPool.removeSlot(allocationId); Optional<ResourceCounter> previouslyFulfilledRequirement = removedSlot.map(Collections::singleton).map(this::getFulfilledRequirements); removedSlot.ifPresent(allocatedSlot -> { releasePayload(Collections.singleton(allocatedSlot), cause); releaseSlots(Collections.singleton(allocatedSlot), cause); }); return previouslyFulfilledRequirement.orElseGet(ResourceCounter::empty); } private void releasePayload(Iterable<? extends AllocatedSlot> allocatedSlots, Throwable cause) { for (AllocatedSlot allocatedSlot : allocatedSlots) { allocatedSlot.releasePayload(cause); } } @Override public void releaseIdleSlots(long currentTimeMillis) { final Collection<AllocatedSlotPool.FreeSlotInfo> freeSlotsInformation = slotPool.getFreeSlotsInformation(); ResourceCounter excessResources = fulfilledResourceRequirements.subtract(totalResourceRequirements); final Iterator<AllocatedSlotPool.FreeSlotInfo> freeSlotIterator = freeSlotsInformation.iterator(); final Collection<AllocatedSlot> slotsToReturnToOwner = new ArrayList<>(); while (!excessResources.isEmpty() && freeSlotIterator.hasNext()) { final AllocatedSlotPool.FreeSlotInfo idleSlot = freeSlotIterator.next(); if (currentTimeMillis >= idleSlot.getFreeSince() + idleSlotTimeout.toMilliseconds()) { final ResourceProfile matchingProfile = getMatchingResourceProfile(idleSlot.getAllocationId()); if (excessResources.containsResource(matchingProfile)) { excessResources = excessResources.subtract(matchingProfile, 1); final Optional<AllocatedSlot> removedSlot = slotPool.removeSlot(idleSlot.getAllocationId()); final AllocatedSlot allocatedSlot = removedSlot.orElseThrow(() -> new IllegalStateException(String.format("Could not find slot for allocation id %s.", idleSlot.getAllocationId()))); slotsToReturnToOwner.add(allocatedSlot); } } } releaseSlots(slotsToReturnToOwner, new FlinkException("Returning idle slots to their owners.")); } private void releaseSlots(Iterable<AllocatedSlot> slotsToReturnToOwner, Throwable cause) { for (AllocatedSlot slotToReturn : slotsToReturnToOwner) { Preconditions.checkState(!slotToReturn.isUsed(), "Free slot must not be used."); if (LOG.isDebugEnabled()) { LOG.info("Releasing slot [{}].", slotToReturn.getAllocationId(), cause); } else { LOG.info("Releasing slot [{}].", slotToReturn.getAllocationId()); } final ResourceProfile matchingResourceProfile = getMatchingResourceProfile(slotToReturn.getAllocationId()); fulfilledResourceRequirements = fulfilledResourceRequirements.subtract(matchingResourceProfile, 1); slotToRequirementProfileMappings.remove(slotToReturn.getAllocationId()); final CompletableFuture<Acknowledge> freeSlotFuture = slotToReturn.getTaskManagerGateway().freeSlot( slotToReturn.getAllocationId(), cause, rpcTimeout); freeSlotFuture.whenComplete((Acknowledge ignored, Throwable throwable) -> { if (throwable != null) { LOG.debug("Releasing slot [{}] of registered TaskExecutor {} failed. Discarding slot.", slotToReturn.getAllocationId(), slotToReturn.getTaskManagerId(), throwable); } }); } } @Override public Collection<SlotInfoWithUtilization> getFreeSlotsInformation() { return slotPool.getFreeSlotsInformation().stream() .map(AllocatedSlotPool.FreeSlotInfo::asSlotInfo) .collect(Collectors.toList()); } @Override public Collection<? extends SlotInfo> getAllSlotsInformation() { return slotPool.getAllSlotsInformation(); } @Override public boolean containsSlots(ResourceID owner) { return slotPool.containsSlots(owner); } private ResourceCounter getFulfilledRequirements(Iterable<? extends AllocatedSlot> allocatedSlots) { ResourceCounter resourceDecrement = ResourceCounter.empty(); for (AllocatedSlot allocatedSlot : allocatedSlots) { final ResourceProfile matchingResourceProfile = getMatchingResourceProfile(allocatedSlot.getAllocationId()); resourceDecrement = resourceDecrement.add(matchingResourceProfile, 1); } return resourceDecrement; } @VisibleForTesting ResourceCounter getFulfilledResourceRequirements() { return fulfilledResourceRequirements; } private static final class SlotOfferMatching { private final SlotOffer slotOffer; @Nullable private final ResourceProfile matching; private SlotOfferMatching(SlotOffer slotOffer, @Nullable ResourceProfile matching) { this.slotOffer = slotOffer; this.matching = matching; } private SlotOffer getSlotOffer() { return slotOffer; } private Optional<ResourceProfile> getMatching() { return Optional.ofNullable(matching); } private static SlotOfferMatching createMatching(SlotOffer slotOffer, ResourceProfile matching) { return new SlotOfferMatching(slotOffer, matching); } private static SlotOfferMatching createMismatch(SlotOffer slotOffer) { return new SlotOfferMatching(slotOffer, null); } } }
We can revisit this separately.
public void visit(BLangTypeDefinition astTypeDefinition) { BType type = getDefinedType(astTypeDefinition); Name displayName = astTypeDefinition.symbol.name; if (type.tag == TypeTags.RECORD) { BRecordType recordType = (BRecordType) type; if (recordType.shouldPrintShape()) { displayName = new Name(recordType.toString()); } } BIRTypeDefinition typeDef = new BIRTypeDefinition(astTypeDefinition.pos, astTypeDefinition.symbol.name, astTypeDefinition.symbol.flags, astTypeDefinition.isBuiltinTypeDef, type, new ArrayList<>(), astTypeDefinition.symbol.origin.toBIROrigin(), displayName, astTypeDefinition.symbol.originalName); if (astTypeDefinition.symbol.tag == SymTag.TYPE_DEF) { BTypeReferenceType referenceType = ((BTypeDefinitionSymbol) astTypeDefinition.symbol).referenceType; typeDef.referenceType = referenceType; BTypeSymbol typeSymbol = astTypeDefinition.symbol.type.tsymbol; if (type.tsymbol.owner == astTypeDefinition.symbol.owner && !(Symbols.isFlagOn(typeSymbol.flags, Flags.CLASS))) { typeDefs.put(typeSymbol, typeDef); } else { if (referenceType != null) { typeDef.type = referenceType; } } } else { typeDefs.put(astTypeDefinition.symbol, typeDef); } this.env.enclPkg.typeDefs.add(typeDef); typeDef.index = this.env.enclPkg.typeDefs.size() - 1; typeDef.setMarkdownDocAttachment(astTypeDefinition.symbol.markdownDocumentation); populateBIRAnnotAttachments(astTypeDefinition.annAttachments, typeDef.annotAttachments, this.env); if (astTypeDefinition.typeNode.getKind() == NodeKind.RECORD_TYPE || astTypeDefinition.typeNode.getKind() == NodeKind.OBJECT_TYPE) { BLangStructureTypeNode typeNode = (BLangStructureTypeNode) astTypeDefinition.typeNode; for (BLangType typeRef : typeNode.typeRefs) { typeDef.referencedTypes.add(typeRef.getBType()); } } BSymbol typeSymbol = astTypeDefinition.symbol.tag == SymTag.TYPE_DEF ? astTypeDefinition.symbol.type.tsymbol : astTypeDefinition.symbol; if (typeSymbol.tag != SymTag.OBJECT || !Symbols.isFlagOn(typeSymbol.flags, Flags.CLASS)) { return; } for (BAttachedFunction func : ((BObjectTypeSymbol) typeSymbol).referencedFunctions) { if (!Symbols.isFlagOn(func.symbol.flags, Flags.INTERFACE)) { return; } BInvokableSymbol funcSymbol = func.symbol; BIRFunction birFunc = new BIRFunction(astTypeDefinition.pos, func.funcName, funcSymbol.flags, func.type, names.fromString(DEFAULT_WORKER_NAME), 0, funcSymbol.origin.toBIROrigin()); if (funcSymbol.receiverSymbol != null) { birFunc.receiver = getSelf(funcSymbol.receiverSymbol ); } birFunc.setMarkdownDocAttachment(funcSymbol.markdownDocumentation); int defaultableParamsCount = 0; birFunc.argsCount = funcSymbol.params.size() + defaultableParamsCount + (funcSymbol.restParam != null ? 1 : 0); funcSymbol.params.forEach(requiredParam -> addParam(birFunc, requiredParam, astTypeDefinition.pos)); if (funcSymbol.restParam != null) { addRestParam(birFunc, funcSymbol.restParam, astTypeDefinition.pos); } birFunc.returnVariable = new BIRVariableDcl(astTypeDefinition.pos, funcSymbol.retType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.RETURN, null); birFunc.localVars.add(0, birFunc.returnVariable); typeDef.attachedFuncs.add(birFunc); } }
&& !(Symbols.isFlagOn(typeSymbol.flags, Flags.CLASS))) {
public void visit(BLangTypeDefinition astTypeDefinition) { BType type = getDefinedType(astTypeDefinition); Name displayName = astTypeDefinition.symbol.name; if (type.tag == TypeTags.RECORD) { BRecordType recordType = (BRecordType) type; if (recordType.shouldPrintShape()) { displayName = new Name(recordType.toString()); } } BIRTypeDefinition typeDef = new BIRTypeDefinition(astTypeDefinition.pos, astTypeDefinition.symbol.name, astTypeDefinition.symbol.flags, astTypeDefinition.isBuiltinTypeDef, type, new ArrayList<>(), astTypeDefinition.symbol.origin.toBIROrigin(), displayName, astTypeDefinition.symbol.originalName); if (astTypeDefinition.symbol.tag == SymTag.TYPE_DEF) { BTypeReferenceType referenceType = ((BTypeDefinitionSymbol) astTypeDefinition.symbol).referenceType; typeDef.referenceType = referenceType; BTypeSymbol typeSymbol = astTypeDefinition.symbol.type.tsymbol; if (type.tsymbol.owner == astTypeDefinition.symbol.owner && !(Symbols.isFlagOn(typeSymbol.flags, Flags.CLASS))) { typeDefs.put(typeSymbol, typeDef); } else { if (referenceType != null) { typeDef.type = referenceType; } } } else { typeDefs.put(astTypeDefinition.symbol, typeDef); } this.env.enclPkg.typeDefs.add(typeDef); typeDef.index = this.env.enclPkg.typeDefs.size() - 1; typeDef.setMarkdownDocAttachment(astTypeDefinition.symbol.markdownDocumentation); populateBIRAnnotAttachments(astTypeDefinition.annAttachments, typeDef.annotAttachments, this.env); if (astTypeDefinition.typeNode.getKind() == NodeKind.RECORD_TYPE || astTypeDefinition.typeNode.getKind() == NodeKind.OBJECT_TYPE) { BLangStructureTypeNode typeNode = (BLangStructureTypeNode) astTypeDefinition.typeNode; for (BLangType typeRef : typeNode.typeRefs) { typeDef.referencedTypes.add(typeRef.getBType()); } } BSymbol typeSymbol = astTypeDefinition.symbol.tag == SymTag.TYPE_DEF ? astTypeDefinition.symbol.type.tsymbol : astTypeDefinition.symbol; if (typeSymbol.tag != SymTag.OBJECT || !Symbols.isFlagOn(typeSymbol.flags, Flags.CLASS)) { return; } for (BAttachedFunction func : ((BObjectTypeSymbol) typeSymbol).referencedFunctions) { if (!Symbols.isFlagOn(func.symbol.flags, Flags.INTERFACE)) { return; } BInvokableSymbol funcSymbol = func.symbol; BIRFunction birFunc = new BIRFunction(astTypeDefinition.pos, func.funcName, funcSymbol.flags, func.type, names.fromString(DEFAULT_WORKER_NAME), 0, funcSymbol.origin.toBIROrigin()); if (funcSymbol.receiverSymbol != null) { birFunc.receiver = getSelf(funcSymbol.receiverSymbol ); } birFunc.setMarkdownDocAttachment(funcSymbol.markdownDocumentation); int defaultableParamsCount = 0; birFunc.argsCount = funcSymbol.params.size() + defaultableParamsCount + (funcSymbol.restParam != null ? 1 : 0); funcSymbol.params.forEach(requiredParam -> addParam(birFunc, requiredParam, astTypeDefinition.pos)); if (funcSymbol.restParam != null) { addRestParam(birFunc, funcSymbol.restParam, astTypeDefinition.pos); } birFunc.returnVariable = new BIRVariableDcl(astTypeDefinition.pos, funcSymbol.retType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.RETURN, null); birFunc.localVars.add(0, birFunc.returnVariable); typeDef.attachedFuncs.add(birFunc); } }
class BIRGen extends BLangNodeVisitor { private static final CompilerContext.Key<BIRGen> BIR_GEN = new CompilerContext.Key<>(); public static final String DEFAULT_WORKER_NAME = "function"; public static final String CLONE_READ_ONLY = "cloneReadOnly"; private BIRGenEnv env; private Names names; private final SymbolTable symTable; private BIROptimizer birOptimizer; private final Types types; private boolean varAssignment = false; private Map<BSymbol, BIRTypeDefinition> typeDefs = new LinkedHashMap<>(); private BlockNode currentBlock; private Map<BlockNode, List<BIRVariableDcl>> varDclsByBlock = new HashMap<>(); public Map<BSymbol, BIRGlobalVariableDcl> globalVarMap = new HashMap<>(); private Map<BSymbol, BIRGlobalVariableDcl> dummyGlobalVarMapForLocks = new HashMap<>(); private Map<BLangLockStmt, BIRTerminator.Lock> lockStmtMap = new HashMap<>(); private static final String MOCK_ANNOTATION_DELIMITER = " private static final String MOCK_FN_DELIMITER = "~"; private Unifier unifier; private BirScope currentScope; public static BIRGen getInstance(CompilerContext context) { BIRGen birGen = context.get(BIR_GEN); if (birGen == null) { birGen = new BIRGen(context); } return birGen; } private BIRGen(CompilerContext context) { context.put(BIR_GEN, this); this.names = Names.getInstance(context); this.symTable = SymbolTable.getInstance(context); this.birOptimizer = BIROptimizer.getInstance(context); this.unifier = new Unifier(); this.types = Types.getInstance(context); } public BLangPackage genBIR(BLangPackage astPkg) { BIRPackage birPkg = new BIRPackage(astPkg.pos, astPkg.packageID.orgName, astPkg.packageID.pkgName, astPkg.packageID.name, astPkg.packageID.version, astPkg.packageID.sourceFileName); astPkg.symbol.bir = birPkg; this.env = new BIRGenEnv(birPkg); astPkg.accept(this); this.birOptimizer.optimizePackage(birPkg); if (!astPkg.moduleContextDataHolder.skipTests() && astPkg.hasTestablePackage()) { BIRPackage testBirPkg = new BIRPackage(astPkg.pos, astPkg.packageID.orgName, astPkg.packageID.pkgName, astPkg.packageID.name, astPkg.packageID.version, astPkg.packageID.sourceFileName); this.env = new BIRGenEnv(testBirPkg); astPkg.accept(this); astPkg.getTestablePkgs().forEach(testPkg -> { visitBuiltinFunctions(testPkg, testPkg.initFunction); visitBuiltinFunctions(testPkg, testPkg.startFunction); visitBuiltinFunctions(testPkg, testPkg.stopFunction); for (BLangImportPackage mod : astPkg.imports) { testPkg.imports.remove(mod); } testPkg.accept(this); this.birOptimizer.optimizePackage(testBirPkg); testPkg.symbol.bir = testBirPkg; Map<String, String> mockFunctionMap = astPkg.getTestablePkg().getMockFunctionNamesMap(); if (!mockFunctionMap.isEmpty()) { replaceMockedFunctions(testBirPkg, mockFunctionMap, astPkg.packageID); } }); } setEntryPoints(astPkg); return astPkg; } private void setEntryPoints(BLangPackage pkgNode) { BLangFunction mainFunc = getMainFunction(pkgNode); if (mainFunc != null || listenerDeclarationFound(pkgNode.getGlobalVariables()) || !pkgNode.services.isEmpty()) { pkgNode.symbol.entryPointExists = true; } } private boolean listenerDeclarationFound(List<BLangVariable> globalVars) { for (BLangVariable globalVar : globalVars) { if (Symbols.isFlagOn(globalVar.symbol.flags, Flags.LISTENER)) { return true; } } return false; } private BLangFunction getMainFunction(BLangPackage pkgNode) { for (BLangFunction funcNode : pkgNode.functions) { if (CompilerUtils.isMainFunction(funcNode)) { return funcNode; } } return null; } private void visitBuiltinFunctions(BLangPackage pkgNode, BLangFunction function) { if (Symbols.isFlagOn(pkgNode.symbol.flags, Flags.TESTABLE)) { String funcName = function.getName().value; String builtinFuncName = funcName.substring(funcName.indexOf("<") + 1, funcName.indexOf(">")); String modifiedFuncName = funcName.replace(builtinFuncName, "test" + builtinFuncName); function.name.setValue(modifiedFuncName); Name functionName = names.fromString(modifiedFuncName); function.originalFuncSymbol.name = functionName; function.symbol.name = functionName; } } private void replaceMockedFunctions(BIRPackage birPkg, Map<String, String> mockFunctionMap, PackageID packageID) { replaceFunctions(birPkg.functions, mockFunctionMap, packageID); if (birPkg.typeDefs.size() != 0) { for (BIRTypeDefinition typeDef : birPkg.typeDefs) { if (typeDef.type instanceof BObjectType) { replaceFunctions(typeDef.attachedFuncs, mockFunctionMap, packageID); } } } } private void replaceFunctions(List<BIRFunction> functionList, Map<String, String> mockFunctionMap, PackageID packageID) { for (BIRFunction function : functionList) { List<BIRBasicBlock> basicBlocks = function.basicBlocks; for (BIRBasicBlock basicBlock : basicBlocks) { BIRTerminator bbTerminator = basicBlock.terminator; if (bbTerminator.kind.equals(InstructionKind.CALL)) { BIRTerminator.Call callTerminator = (BIRTerminator.Call) bbTerminator; String functionKey = callTerminator.calleePkg.toString() + MOCK_ANNOTATION_DELIMITER + callTerminator.name.toString(); String legacyKey = callTerminator.calleePkg.toString() + MOCK_FN_DELIMITER + callTerminator.name.toString(); if (mockFunctionMap.containsKey(functionKey)) { String desugarFunction = "$MOCK_" + callTerminator.name.getValue(); callTerminator.name = new Name(desugarFunction); callTerminator.calleePkg = packageID; } else if (mockFunctionMap.get(legacyKey) != null) { String mockfunctionName = mockFunctionMap.get(legacyKey); callTerminator.name = new Name(mockfunctionName); callTerminator.calleePkg = packageID; } } } } } @Override public void visit(BLangPackage astPkg) { astPkg.imports.forEach(impPkg -> impPkg.accept(this)); astPkg.constants.forEach(astConst -> astConst.accept(this)); astPkg.typeDefinitions.forEach(astTypeDef -> astTypeDef.accept(this)); generateClassDefinitions(astPkg.topLevelNodes); astPkg.globalVars.forEach(astGlobalVar -> astGlobalVar.accept(this)); astPkg.initFunction.accept(this); astPkg.startFunction.accept(this); astPkg.stopFunction.accept(this); astPkg.functions.forEach(astFunc -> astFunc.accept(this)); astPkg.annotations.forEach(astAnn -> astAnn.accept(this)); astPkg.services.forEach(service -> service.accept(this)); } private void generateClassDefinitions(List<TopLevelNode> topLevelNodes) { for (TopLevelNode topLevelNode : topLevelNodes) { if (topLevelNode.getKind() == CLASS_DEFN) { ((BLangClassDefinition) topLevelNode).accept(this); } } } @Override private BType getDefinedType(BLangTypeDefinition astTypeDefinition) { BType nodeType = astTypeDefinition.typeNode.getBType(); if (types.getReferredType(nodeType).tag == TypeTags.ERROR) { return astTypeDefinition.symbol.type; } return nodeType; } @Override public void visit(BLangClassDefinition classDefinition) { BIRTypeDefinition typeDef = new BIRTypeDefinition(classDefinition.pos, classDefinition.symbol.name, classDefinition.symbol.originalName, classDefinition.symbol.flags, false, classDefinition.getBType(), new ArrayList<>(), classDefinition.symbol.origin.toBIROrigin()); typeDefs.put(classDefinition.symbol, typeDef); this.env.enclPkg.typeDefs.add(typeDef); typeDef.index = this.env.enclPkg.typeDefs.size() - 1; typeDef.setMarkdownDocAttachment(classDefinition.symbol.markdownDocumentation); for (BLangType typeRef : classDefinition.typeRefs) { typeDef.referencedTypes.add(typeRef.getBType()); } populateBIRAnnotAttachments(classDefinition.annAttachments, typeDef.annotAttachments, this.env); for (BAttachedFunction func : ((BObjectTypeSymbol) classDefinition.symbol).referencedFunctions) { BInvokableSymbol funcSymbol = func.symbol; BIRFunction birFunc = new BIRFunction(classDefinition.pos, func.funcName, funcSymbol.flags, func.type, names.fromString(DEFAULT_WORKER_NAME), 0, funcSymbol.origin.toBIROrigin()); if (funcSymbol.receiverSymbol != null) { birFunc.receiver = getSelf(funcSymbol.receiverSymbol); } birFunc.setMarkdownDocAttachment(funcSymbol.markdownDocumentation); int defaultableParamsCount = 0; birFunc.argsCount = funcSymbol.params.size() + defaultableParamsCount + (funcSymbol.restParam != null ? 1 : 0); funcSymbol.params.forEach(requiredParam -> addParam(birFunc, requiredParam, classDefinition.pos)); if (funcSymbol.restParam != null) { addRestParam(birFunc, funcSymbol.restParam, classDefinition.pos); } birFunc.returnVariable = new BIRVariableDcl(classDefinition.pos, funcSymbol.retType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.RETURN, null); birFunc.localVars.add(0, birFunc.returnVariable); typeDef.attachedFuncs.add(birFunc); } } @Override public void visit(BLangService serviceNode) { BServiceSymbol symbol = (BServiceSymbol) serviceNode.symbol; List<String> attachPoint = symbol.getAbsResourcePath().orElse(null); String attachPointLiteral = symbol.getAttachPointStringLiteral().orElse(null); BIRNode.BIRServiceDeclaration serviceDecl = new BIRNode.BIRServiceDeclaration(attachPoint, attachPointLiteral, symbol.getListenerTypes(), symbol.name, symbol.getAssociatedClassSymbol().name, symbol.type, symbol.origin, symbol.flags, symbol.pos); serviceDecl.setMarkdownDocAttachment(symbol.markdownDocumentation); this.env.enclPkg.serviceDecls.add(serviceDecl); } @Override public void visit(BLangConstant astConstant) { BConstantSymbol constantSymbol = astConstant.symbol; Name constName = constantSymbol.name; Name constOriginalName = constantSymbol.getOriginalName(); BType type = constantSymbol.type; ConstValue constantValue = getBIRConstantVal(constantSymbol.value); BIRConstant birConstant = new BIRConstant(astConstant.pos, constName, constOriginalName, constantSymbol.flags, type, constantValue, constantSymbol.origin.toBIROrigin()); birConstant.constValue = constantValue; birConstant.setMarkdownDocAttachment(astConstant.symbol.markdownDocumentation); this.env.enclPkg.constants.add(birConstant); } private ConstValue getBIRConstantVal(BLangConstantValue constValue) { if (constValue.type.tag == TypeTags.MAP) { Map<String, ConstValue> mapConstVal = new HashMap<>(); ((Map<String, BLangConstantValue>) constValue.value) .forEach((key, value) -> mapConstVal.put(key, getBIRConstantVal(value))); return new ConstValue(mapConstVal, constValue.type); } return new ConstValue(constValue.value, constValue.type); } @Override public void visit(BLangImportPackage impPkg) { this.env.enclPkg.importModules.add(new BIRNode.BIRImportModule(impPkg.pos, impPkg.symbol.pkgID.orgName, impPkg.symbol.pkgID.name, impPkg.symbol.pkgID.version)); } @Override public void visit(BLangResourceFunction resourceFunction) { visit((BLangFunction) resourceFunction); } @Override public void visit(BLangFunction astFunc) { BInvokableType type = astFunc.symbol.getType(); boolean isTypeAttachedFunction = astFunc.flagSet.contains(Flag.ATTACHED) && !typeDefs.containsKey(astFunc.receiver.getBType().tsymbol); Name workerName = names.fromIdNode(astFunc.defaultWorkerName); this.env.unlockVars.push(new BIRLockDetailsHolder()); BIRFunction birFunc; if (isTypeAttachedFunction) { Name funcName = names.fromString(astFunc.symbol.name.value); birFunc = new BIRFunction(astFunc.pos, funcName, names.fromString(astFunc.symbol.getOriginalName().value), astFunc.symbol.flags, type, workerName, astFunc.sendsToThis.size(), astFunc.symbol.origin.toBIROrigin()); } else { Name funcName = getFuncName(astFunc.symbol); birFunc = new BIRFunction(astFunc.pos, funcName, names.fromString(astFunc.symbol.getOriginalName().value), astFunc.symbol.flags, type, workerName, astFunc.sendsToThis.size(), astFunc.symbol.origin.toBIROrigin()); } this.currentScope = new BirScope(0, null); if (astFunc.receiver != null) { BIRFunctionParameter birVarDcl = new BIRFunctionParameter(astFunc.pos, astFunc.receiver.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.ARG, astFunc.receiver.name.value, false); this.env.symbolVarMap.put(astFunc.receiver.symbol, birVarDcl); } if (astFunc.receiver != null) { birFunc.receiver = getSelf(astFunc.receiver.symbol); } birFunc.setMarkdownDocAttachment(astFunc.symbol.markdownDocumentation); int i = 0; for (String channelName : astFunc.sendsToThis) { birFunc.workerChannels[i] = new BIRNode.ChannelDetails(channelName, astFunc.defaultWorkerName.value .equals(DEFAULT_WORKER_NAME), isWorkerSend(channelName, astFunc.defaultWorkerName.value)); i++; } if (astFunc.hasBody() && astFunc.body.getKind() == NodeKind.EXTERN_FUNCTION_BODY) { populateBIRAnnotAttachments(((BLangExternalFunctionBody) astFunc.body).annAttachments, birFunc.annotAttachments, this.env); } populateBIRAnnotAttachments(astFunc.annAttachments, birFunc.annotAttachments, this.env); populateBIRAnnotAttachments(astFunc.returnTypeAnnAttachments, birFunc.returnTypeAnnots, this.env); birFunc.argsCount = astFunc.requiredParams.size() + (astFunc.restParam != null ? 1 : 0) + astFunc.paramClosureMap.size(); if (astFunc.flagSet.contains(Flag.ATTACHED) && typeDefs.containsKey(astFunc.receiver.getBType().tsymbol)) { typeDefs.get(astFunc.receiver.getBType().tsymbol).attachedFuncs.add(birFunc); } else { this.env.enclPkg.functions.add(birFunc); } this.env.enclFunc = birFunc; BType retType = unifier.build(astFunc.symbol.type.getReturnType()); birFunc.returnVariable = new BIRVariableDcl(astFunc.pos, retType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.RETURN, null); birFunc.localVars.add(0, birFunc.returnVariable); astFunc.paramClosureMap.forEach((k, v) -> addRequiredParam(birFunc, v, astFunc.pos)); astFunc.requiredParams.forEach(requiredParam -> addParam(birFunc, requiredParam)); if (astFunc.restParam != null) { addRestParam(birFunc, astFunc.restParam.symbol, astFunc.restParam.pos); } if (astFunc.interfaceFunction || Symbols.isNative(astFunc.symbol)) { this.env.clear(); return; } BIRBasicBlock entryBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks = birFunc.basicBlocks; birFunc.basicBlocks.add(entryBB); this.env.enclBB = entryBB; addToTrapStack(entryBB); astFunc.body.accept(this); birFunc.basicBlocks.add(this.env.returnBB); BIRBasicBlock enclBB = this.env.enclBB; if (enclBB.instructions.size() == 0 && enclBB.terminator == null && this.env.returnBB != null) { enclBB.terminator = new BIRTerminator.GOTO(null, this.env.returnBB, this.currentScope); } this.env.clear(); this.env.unlockVars.clear(); birFunc.parameters.values().forEach(basicBlocks -> basicBlocks.forEach(bb -> bb.id = this.env.nextBBId(names))); birFunc.basicBlocks.forEach(bb -> bb.id = this.env.nextBBId(names)); birFunc.errorTable.sort(Comparator.comparingInt(o -> Integer.parseInt(o.trapBB.id.value.replace("bb", "")))); birFunc.dependentGlobalVars = astFunc.symbol.dependentGlobalVars.stream() .map(varSymbol -> this.globalVarMap.get(varSymbol)).collect(Collectors.toSet()); this.env.clear(); } private BIRVariableDcl getSelf(BSymbol receiver) { BIRVariableDcl self = this.env.symbolVarMap.get(receiver); if (self == null) { return new BIRVariableDcl(null, receiver.type, receiver.name, VarScope.FUNCTION, VarKind.SELF, null); } self.kind = VarKind.SELF; self.name = new Name("%self"); return self; } @Override public void visit(BLangBlockFunctionBody astBody) { BIRBasicBlock endLoopEndBB = this.env.enclLoopEndBB; BlockNode prevBlock = this.currentBlock; this.currentBlock = astBody; this.varDclsByBlock.computeIfAbsent(astBody, k -> new ArrayList<>()); for (BLangStatement astStmt : astBody.stmts) { astStmt.accept(this); } List<BIRVariableDcl> varDecls = this.varDclsByBlock.get(astBody); for (BIRVariableDcl birVariableDcl : varDecls) { birVariableDcl.endBB = this.env.enclBasicBlocks.get(this.env.enclBasicBlocks.size() - 1); } this.env.enclLoopEndBB = endLoopEndBB; this.currentBlock = prevBlock; } private BIRBasicBlock beginBreakableBlock(Location pos, BLangBlockStmt.FailureBreakMode mode) { BIRBasicBlock blockBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(blockBB); this.env.enclBasicBlocks.add(blockBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(pos, blockBB, this.currentScope); BIRBasicBlock blockEndBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(blockEndBB); blockBB.terminator = new BIRTerminator.GOTO(pos, blockEndBB, this.currentScope); this.env.enclBB = blockBB; if (mode == BLangBlockStmt.FailureBreakMode.BREAK_WITHIN_BLOCK) { this.env.enclInnerOnFailEndBB = blockEndBB; } else { this.env.enclOnFailEndBB = blockEndBB; } this.env.unlockVars.push(new BIRLockDetailsHolder()); return blockEndBB; } private void endBreakableBlock(BIRBasicBlock blockEndBB) { this.env.unlockVars.pop(); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(null, blockEndBB, this.currentScope); } this.env.enclBasicBlocks.add(blockEndBB); this.env.enclBB = blockEndBB; } @Override public void visit(BLangAnnotationAttachment astAnnotAttach) { BIRAnnotationValue annotationValue; if (astAnnotAttach.expr == null) { annotationValue = new BIRNode.BIRAnnotationLiteralValue(symTable.booleanType, true); } else { if (!isCompileTimeAnnotationValue(astAnnotAttach.expr)) { return; } annotationValue = createAnnotationValue(astAnnotAttach.expr); } Name annotTagRef = this.names.fromIdNode(astAnnotAttach.annotationName); BIRAnnotationAttachment annotAttachment = new BIRAnnotationAttachment(astAnnotAttach.pos, annotTagRef); annotAttachment.packageID = astAnnotAttach.annotationSymbol.pkgID; annotAttachment.annotValues.add(annotationValue); this.env.enclAnnotAttachments.add(annotAttachment); } private boolean isCompileTimeAnnotationValue(BLangExpression expression) { BLangExpression expr = unwrapAnnotationExpressionFromCloneReadOnly(expression); switch (expr.getKind()) { case LITERAL: case NUMERIC_LITERAL: return true; case RECORD_LITERAL_EXPR: BLangRecordLiteral recordLiteral = (BLangRecordLiteral) expr; for (RecordLiteralNode.RecordField field : recordLiteral.fields) { if (!isCompileTimeAnnotationValue(((BLangRecordKeyValueField) field).valueExpr)) { return false; } } return true; case ARRAY_LITERAL_EXPR: BLangArrayLiteral arrayLiteral = (BLangArrayLiteral) expr; for (BLangExpression bLangExpr : arrayLiteral.exprs) { if (!isCompileTimeAnnotationValue(bLangExpr)) { return false; } } return true; case TYPE_CONVERSION_EXPR: return isCompileTimeAnnotationValue(((BLangTypeConversionExpr) expr).expr); case STATEMENT_EXPRESSION: BLangStatementExpression stmtExpr = (BLangStatementExpression) expr; List<BLangStatement> stmts = ((BLangBlockStmt) stmtExpr.stmt).stmts; if (!((BLangLocalVarRef) stmtExpr.expr).varSymbol.name.value.startsWith(DESUGARED_MAPPING_CONSTR_KEY)) { return false; } for (int i = 1; i < stmts.size(); i++) { BLangAssignment assignmentStmt = (BLangAssignment) stmts.get(i); if (!isCompileTimeAnnotationValue(((BLangIndexBasedAccess) assignmentStmt.varRef).indexExpr) || !isCompileTimeAnnotationValue(assignmentStmt.expr)) { return false; } } return true; default: return false; } } private BLangExpression unwrapAnnotationExpressionFromCloneReadOnly(BLangExpression expr) { if (expr.getKind() == INVOCATION) { BLangInvocation invocation = (BLangInvocation) expr; if (invocation.name.getValue().equals(CLONE_READ_ONLY)) { return invocation.expr; } } return expr; } private BIRAnnotationValue createAnnotationValue(BLangExpression expression) { BLangExpression expr = unwrapAnnotationExpressionFromCloneReadOnly(expression); switch (expr.getKind()) { case LITERAL: case NUMERIC_LITERAL: return createAnnotationLiteralValue((BLangLiteral) expr); case RECORD_LITERAL_EXPR: return createAnnotationRecordValue((BLangRecordLiteral) expr); case ARRAY_LITERAL_EXPR: return createAnnotationArrayValue((BLangArrayLiteral) expr); case TYPE_CONVERSION_EXPR: return createAnnotationValue(((BLangTypeConversionExpr) expr).expr); case STATEMENT_EXPRESSION: return createAnnotationRecordValue((BLangStatementExpression) expr); default: throw new IllegalStateException("Invalid annotation value expression kind: " + expr.getKind()); } } private BIRNode.BIRAnnotationRecordValue createAnnotationRecordValue(BLangRecordLiteral recordLiteral) { Map<String, BIRAnnotationValue> annotValueEntryMap = new HashMap<>(); for (RecordLiteralNode.RecordField field : recordLiteral.fields) { BLangRecordKeyValueField keyValuePair = (BLangRecordKeyValueField) field; BLangLiteral keyLiteral = (BLangLiteral) keyValuePair.key.expr; String entryKey = (String) keyLiteral.value; BIRAnnotationValue annotationValue = createAnnotationValue(keyValuePair.valueExpr); annotValueEntryMap.put(entryKey, annotationValue); } return new BIRNode.BIRAnnotationRecordValue(recordLiteral.getBType(), annotValueEntryMap); } private BIRNode.BIRAnnotationRecordValue createAnnotationRecordValue(BLangStatementExpression stmtExpr) { Map<String, BIRAnnotationValue> annotValueEntryMap = new HashMap<>(); List<BLangStatement> stmts = ((BLangBlockStmt) stmtExpr.stmt).stmts; for (int i = 1; i < stmts.size(); i++) { BLangAssignment assignmentStmt = (BLangAssignment) stmts.get(i); annotValueEntryMap.put( (String) ((BLangLiteral) ((BLangIndexBasedAccess) assignmentStmt.varRef).indexExpr).value, createAnnotationValue(assignmentStmt.expr)); } return new BIRNode.BIRAnnotationRecordValue(stmtExpr.getBType(), annotValueEntryMap); } private BIRNode.BIRAnnotationArrayValue createAnnotationArrayValue(BLangArrayLiteral arrayLiteral) { BIRAnnotationValue[] annotValues = new BIRAnnotationValue[arrayLiteral.exprs.size()]; for (int exprIndex = 0; exprIndex < arrayLiteral.exprs.size(); exprIndex++) { annotValues[exprIndex] = createAnnotationValue(arrayLiteral.exprs.get(exprIndex)); } return new BIRNode.BIRAnnotationArrayValue(arrayLiteral.getBType(), annotValues); } private BIRNode.BIRAnnotationLiteralValue createAnnotationLiteralValue(BLangLiteral literalValue) { return new BIRNode.BIRAnnotationLiteralValue(literalValue.getBType(), literalValue.value); } @Override public void visit(BLangAnnotation astAnnotation) { BAnnotationSymbol annSymbol = (BAnnotationSymbol) astAnnotation.symbol; BIRAnnotation birAnn = new BIRAnnotation(astAnnotation.pos, annSymbol.name, annSymbol.originalName, annSymbol.flags, annSymbol.points, annSymbol.attachedType == null ? symTable.trueType : annSymbol.attachedType, annSymbol.origin.toBIROrigin()); birAnn.setMarkdownDocAttachment(annSymbol.markdownDocumentation); this.env.enclPkg.annotations.add(birAnn); } private boolean isWorkerSend(String chnlName, String workerName) { return chnlName.split("->")[0].equals(workerName); } @Override public void visit(BLangLambdaFunction lambdaExpr) { BIRVariableDcl tempVarLambda = new BIRVariableDcl(lambdaExpr.pos, lambdaExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP, null); this.env.enclFunc.localVars.add(tempVarLambda); BIROperand lhsOp = new BIROperand(tempVarLambda); Name funcName = getFuncName(lambdaExpr.function.symbol); List<BIRVariableDcl> params = new ArrayList<>(); lambdaExpr.function.requiredParams.forEach(param -> { BIRVariableDcl birVarDcl = new BIRVariableDcl(param.pos, param.symbol.type, this.env.nextLambdaVarId(names), VarScope.FUNCTION, VarKind.ARG, param.name.value); params.add(birVarDcl); }); BLangSimpleVariable restParam = lambdaExpr.function.restParam; if (restParam != null) { BIRVariableDcl birVarDcl = new BIRVariableDcl(restParam.pos, restParam.symbol.type, this.env.nextLambdaVarId(names), VarScope.FUNCTION, VarKind.ARG, null); params.add(birVarDcl); } setScopeAndEmit( new BIRNonTerminator.FPLoad(lambdaExpr.pos, lambdaExpr.function.symbol.pkgID, funcName, lhsOp, params, getClosureMapOperands(lambdaExpr), lambdaExpr.getBType(), lambdaExpr.function.symbol.strandName, lambdaExpr.function.symbol.schedulerPolicy)); this.env.targetOperand = lhsOp; } private List<BIROperand> getClosureMapOperands(BLangLambdaFunction lambdaExpr) { List<BIROperand> closureMaps = new ArrayList<>(); lambdaExpr.function.paramClosureMap.forEach((k, v) -> { BVarSymbol symbol = lambdaExpr.enclMapSymbols.get(k); if (symbol == null) { symbol = lambdaExpr.paramMapSymbolsOfEnclInvokable.get(k); } BIROperand varRef = new BIROperand(this.env.symbolVarMap.get(symbol)); closureMaps.add(varRef); }); return closureMaps; } private Name getFuncName(BInvokableSymbol symbol) { if (symbol.receiverSymbol == null) { return names.fromString(symbol.name.value); } int offset = symbol.receiverSymbol.type.tsymbol.name.value.length() + 1; String attachedFuncName = symbol.name.value; return names.fromString(attachedFuncName.substring(offset)); } private void addParam(BIRFunction birFunc, BLangVariable functionParam) { addParam(birFunc, functionParam.symbol, functionParam.expr, functionParam.pos); } private void addParam(BIRFunction birFunc, BVarSymbol paramSymbol, Location pos) { addParam(birFunc, paramSymbol, null, pos); } private void addParam(BIRFunction birFunc, BVarSymbol paramSymbol, BLangExpression defaultValExpr, Location pos) { BIRFunctionParameter birVarDcl = new BIRFunctionParameter(pos, paramSymbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.ARG, paramSymbol.name.value, defaultValExpr != null); birFunc.localVars.add(birVarDcl); List<BIRBasicBlock> bbsOfDefaultValueExpr = new ArrayList<>(); if (defaultValExpr != null) { BIRBasicBlock defaultExprBB = new BIRBasicBlock(this.env.nextBBId(names)); bbsOfDefaultValueExpr.add(defaultExprBB); this.env.enclBB = defaultExprBB; this.env.enclBasicBlocks = bbsOfDefaultValueExpr; defaultValExpr.accept(this); BIROperand varRef = new BIROperand(birVarDcl); setScopeAndEmit(new Move(birFunc.pos, this.env.targetOperand, varRef)); this.env.enclBB.terminator = new BIRTerminator.Return(birFunc.pos); } BIRParameter parameter = new BIRParameter(pos, paramSymbol.name, paramSymbol.flags); birFunc.requiredParams.add(parameter); birFunc.parameters.put(birVarDcl, bbsOfDefaultValueExpr); this.env.symbolVarMap.put(paramSymbol, birVarDcl); } private void addRestParam(BIRFunction birFunc, BVarSymbol paramSymbol, Location pos) { BIRFunctionParameter birVarDcl = new BIRFunctionParameter(pos, paramSymbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.ARG, paramSymbol.name.value, false); birFunc.parameters.put(birVarDcl, new ArrayList<>()); birFunc.localVars.add(birVarDcl); birFunc.restParam = new BIRParameter(pos, paramSymbol.name, paramSymbol.flags); this.env.symbolVarMap.put(paramSymbol, birVarDcl); } private void addRequiredParam(BIRFunction birFunc, BVarSymbol paramSymbol, Location pos) { BIRFunctionParameter birVarDcl = new BIRFunctionParameter(pos, paramSymbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.ARG, paramSymbol.name.value, false); birFunc.parameters.put(birVarDcl, new ArrayList<>()); birFunc.localVars.add(birVarDcl); BIRParameter parameter = new BIRParameter(pos, paramSymbol.name, paramSymbol.flags); birFunc.requiredParams.add(parameter); this.env.symbolVarMap.put(paramSymbol, birVarDcl); } @Override public void visit(BLangBlockStmt astBlockStmt) { BIRBasicBlock blockEndBB = null; BIRBasicBlock currentOnFailEndBB = this.env.enclOnFailEndBB; BIRBasicBlock currentWithinOnFailEndBB = this.env.enclInnerOnFailEndBB; BlockNode prevBlock = this.currentBlock; this.currentBlock = astBlockStmt; this.varDclsByBlock.computeIfAbsent(astBlockStmt, k -> new ArrayList<>()); if (astBlockStmt.failureBreakMode != BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE) { blockEndBB = beginBreakableBlock(astBlockStmt.pos, astBlockStmt.failureBreakMode); } for (BLangStatement astStmt : astBlockStmt.stmts) { astStmt.accept(this); } if (astBlockStmt.failureBreakMode != BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE) { endBreakableBlock(blockEndBB); } this.varDclsByBlock.get(astBlockStmt).forEach(birVariableDcl -> birVariableDcl.endBB = this.env.enclBasicBlocks.get(this.env.enclBasicBlocks.size() - 1) ); this.env.enclInnerOnFailEndBB = currentWithinOnFailEndBB; this.env.enclOnFailEndBB = currentOnFailEndBB; this.currentBlock = prevBlock; } @Override public void visit(BLangFail failNode) { if (failNode.expr == null) { if (this.env.enclInnerOnFailEndBB != null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(failNode.pos, this.env.enclInnerOnFailEndBB, this.currentScope); } return; } BIRLockDetailsHolder toUnlock = this.env.unlockVars.peek(); if (!toUnlock.isEmpty()) { BIRBasicBlock goToBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(goToBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(failNode.pos, goToBB, this.currentScope); this.env.enclBB = goToBB; } int numLocks = toUnlock.size(); while (numLocks > 0) { BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(unlockBB); BIRTerminator.Unlock unlock = new BIRTerminator.Unlock(null, unlockBB, this.currentScope); this.env.enclBB.terminator = unlock; unlock.relatedLock = toUnlock.getLock(numLocks - 1); this.env.enclBB = unlockBB; numLocks--; } BIRBasicBlock onFailBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(onFailBB); this.env.enclBasicBlocks.add(onFailBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(failNode.pos, onFailBB, this.currentScope); this.env.enclBB = onFailBB; failNode.exprStmt.accept(this); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(failNode.pos, this.env.enclOnFailEndBB, this.currentScope); } BIRBasicBlock ignoreBlock = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(ignoreBlock); ignoreBlock.terminator = new BIRTerminator.GOTO(failNode.pos, this.env.enclOnFailEndBB, this.currentScope); this.env.enclBasicBlocks.add(ignoreBlock); this.env.enclBB = ignoreBlock; } @Override public void visit(BLangSimpleVariableDef astVarDefStmt) { VarKind kind; if (astVarDefStmt.var.symbol.origin == SymbolOrigin.VIRTUAL) { kind = VarKind.SYNTHETIC; } else { kind = VarKind.LOCAL; } BIRVariableDcl birVarDcl = new BIRVariableDcl(astVarDefStmt.pos, astVarDefStmt.var.symbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, kind, astVarDefStmt.var.name.value); birVarDcl.startBB = this.env.enclBB; this.varDclsByBlock.get(this.currentBlock).add(birVarDcl); this.env.enclFunc.localVars.add(birVarDcl); this.env.symbolVarMap.put(astVarDefStmt.var.symbol, birVarDcl); BirScope newScope = new BirScope(this.currentScope.id + 1, this.currentScope); birVarDcl.insScope = newScope; this.currentScope = newScope; if (astVarDefStmt.var.expr == null) { return; } astVarDefStmt.var.expr.accept(this); BIROperand varRef = new BIROperand(birVarDcl); setScopeAndEmit(new Move(astVarDefStmt.pos, this.env.targetOperand, varRef)); birVarDcl.insOffset = this.env.enclBB.instructions.size() - 1; } @Override public void visit(BLangSimpleVariable varNode) { String name = ANNOTATION_DATA.equals(varNode.symbol.name.value) ? ANNOTATION_DATA : varNode.name.value; String originalName = ANNOTATION_DATA.equals(varNode.symbol.getOriginalName().value) ? ANNOTATION_DATA : varNode.name.originalValue; BIRGlobalVariableDcl birVarDcl = new BIRGlobalVariableDcl(varNode.pos, varNode.symbol.flags, varNode.symbol.type, varNode.symbol.pkgID, names.fromString(name), names.fromString(originalName), VarScope.GLOBAL, VarKind.GLOBAL, varNode.name.value, varNode.symbol.origin.toBIROrigin()); birVarDcl.setMarkdownDocAttachment(varNode.symbol.markdownDocumentation); this.env.enclPkg.globalVars.add(birVarDcl); this.globalVarMap.put(varNode.symbol, birVarDcl); env.enclPkg.isListenerAvailable |= Symbols.isFlagOn(varNode.symbol.flags, Flags.LISTENER); } @Override public void visit(BLangAssignment astAssignStmt) { astAssignStmt.expr.accept(this); this.varAssignment = true; astAssignStmt.varRef.accept(this); this.varAssignment = false; } @Override public void visit(BLangExpressionStmt exprStmtNode) { exprStmtNode.expr.accept(this); if (this.env.returnBB == null && exprStmtNode.expr.getKind() == NodeKind.INVOCATION && types.isNeverTypeOrStructureTypeWithARequiredNeverMember(exprStmtNode.expr.getBType())) { BIRBasicBlock returnBB = new BIRBasicBlock(this.env.nextBBId(names)); returnBB.terminator = new BIRTerminator.Return(exprStmtNode.pos); this.env.returnBB = returnBB; } } @Override public void visit(BLangInvocation invocationExpr) { createCall(invocationExpr, false); } @Override public void visit(BLangInvocation.BLangActionInvocation actionInvocation) { createCall(actionInvocation, false); } @Override public void visit(BLangStatementExpression statementExpression) { statementExpression.stmt.accept(this); statementExpression.expr.accept(this); } @Override public void visit(BLangInvocation.BLangAttachedFunctionInvocation invocationExpr) { createCall(invocationExpr, true); } @Override public void visit(BLangInvocation.BFunctionPointerInvocation invocation) { invocation.functionPointerInvocation = true; createCall(invocation, false); } @Override public void visit(BLangForkJoin forkJoin) { forkJoin.workers.forEach(worker -> worker.accept(this)); } @Override public void visit(BLangWorkerReceive workerReceive) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); String channel = workerReceive.workerIdentifier.value + "->" + env.enclFunc.workerName.value; BIRVariableDcl tempVarDcl = new BIRVariableDcl(workerReceive.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; boolean isOnSameStrand = DEFAULT_WORKER_NAME.equals(this.env.enclFunc.workerName.value); this.env.enclBB.terminator = new BIRTerminator.WorkerReceive(workerReceive.pos, names.fromString(channel), lhsOp, isOnSameStrand, thenBB, this.currentScope); this.env.enclBasicBlocks.add(thenBB); this.env.enclBB = thenBB; } @Override public void visit(BLangWorkerSend workerSend) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); this.env.enclBasicBlocks.add(thenBB); workerSend.expr.accept(this); String channelName = this.env.enclFunc.workerName.value + "->" + workerSend.workerIdentifier.value; boolean isOnSameStrand = DEFAULT_WORKER_NAME.equals(this.env.enclFunc.workerName.value); this.env.enclBB.terminator = new BIRTerminator.WorkerSend( workerSend.pos, names.fromString(channelName), this.env.targetOperand, isOnSameStrand, false, null, thenBB, this.currentScope); this.env.enclBB = thenBB; } @Override public void visit(BLangWorkerSyncSendExpr syncSend) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); syncSend.expr.accept(this); BIROperand dataOp = this.env.targetOperand; BIRVariableDcl tempVarDcl = new BIRVariableDcl(syncSend.receive.matchingSendsError, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; String channelName = this.env.enclFunc.workerName.value + "->" + syncSend.workerIdentifier.value; boolean isOnSameStrand = DEFAULT_WORKER_NAME.equals(this.env.enclFunc.workerName.value); this.env.enclBB.terminator = new BIRTerminator.WorkerSend( syncSend.pos, names.fromString(channelName), dataOp, isOnSameStrand, true, lhsOp, thenBB, this.currentScope); this.env.enclBasicBlocks.add(thenBB); this.env.enclBB = thenBB; } @Override public void visit(BLangWorkerFlushExpr flushExpr) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); BIRNode.ChannelDetails[] channels = new BIRNode.ChannelDetails[flushExpr.workerIdentifierList.size()]; int i = 0; for (BLangIdentifier workerIdentifier : flushExpr.workerIdentifierList) { String channelName = this.env.enclFunc.workerName.value + "->" + workerIdentifier.value; boolean isOnSameStrand = DEFAULT_WORKER_NAME.equals(this.env.enclFunc.workerName.value); channels[i] = new BIRNode.ChannelDetails(channelName, isOnSameStrand, true); i++; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(flushExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; this.env.enclBB.terminator = new BIRTerminator.Flush(flushExpr.pos, channels, lhsOp, thenBB, this.currentScope); this.env.enclBasicBlocks.add(thenBB); this.env.enclBB = thenBB; } private void createWait(BLangWaitExpr waitExpr) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); List<BIROperand> exprList = new ArrayList<>(); waitExpr.exprList.forEach(expr -> { expr.accept(this); exprList.add(this.env.targetOperand); }); BIRVariableDcl tempVarDcl = new BIRVariableDcl(waitExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; this.env.enclBB.terminator = new BIRTerminator.Wait(waitExpr.pos, exprList, lhsOp, thenBB, this.currentScope); this.env.enclBasicBlocks.add(thenBB); this.env.enclBB = thenBB; } @Override public void visit(BLangErrorConstructorExpr errorConstructorExpr) { BIRVariableDcl tempVarError = new BIRVariableDcl(errorConstructorExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarError); BIROperand lhsOp = new BIROperand(tempVarError); this.env.targetOperand = lhsOp; List<BLangExpression> positionalArgs = errorConstructorExpr.positionalArgs; positionalArgs.get(0).accept(this); BIROperand messageOp = this.env.targetOperand; positionalArgs.get(1).accept(this); BIROperand causeOp = this.env.targetOperand; errorConstructorExpr.errorDetail.accept(this); BIROperand detailsOp = this.env.targetOperand; BIRNonTerminator.NewError newError = new BIRNonTerminator.NewError(errorConstructorExpr.pos, errorConstructorExpr.getBType(), lhsOp, messageOp, causeOp, detailsOp); setScopeAndEmit(newError); this.env.targetOperand = lhsOp; } private void createCall(BLangInvocation invocationExpr, boolean isVirtual) { List<BLangExpression> requiredArgs = invocationExpr.requiredArgs; List<BLangExpression> restArgs = invocationExpr.restArgs; List<BIRArgument> args = new ArrayList<>(); boolean transactional = Symbols.isFlagOn(invocationExpr.symbol.flags, Flags.TRANSACTIONAL); for (BLangExpression requiredArg : requiredArgs) { if (requiredArg.getKind() == NodeKind.DYNAMIC_PARAM_EXPR) { ((BLangDynamicArgExpr) requiredArg).conditionalArgument.accept(this); BIROperand conditionalArg = this.env.targetOperand; ((BLangDynamicArgExpr) requiredArg).condition.accept(this); BIROperand condition = this.env.targetOperand; args.add(new BIRArgument(ArgumentState.CONDITIONALLY_PROVIDED, conditionalArg.variableDcl, condition)); } else if (requiredArg.getKind() != NodeKind.IGNORE_EXPR) { requiredArg.accept(this); args.add(new BIRArgument(ArgumentState.PROVIDED, this.env.targetOperand.variableDcl)); } else { BIRVariableDcl birVariableDcl = new BIRVariableDcl(requiredArg.getBType(), new Name("_"), VarScope.FUNCTION, VarKind.ARG); birVariableDcl.ignoreVariable = true; args.add(new BIRArgument(ArgumentState.NOT_PROVIDED, birVariableDcl)); } } for (BLangExpression arg : restArgs) { arg.accept(this); args.add(new BIRArgument(ArgumentState.PROVIDED, this.env.targetOperand.variableDcl)); } BIROperand fp = null; if (invocationExpr.functionPointerInvocation) { invocationExpr.expr.accept(this); fp = this.env.targetOperand; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(invocationExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); this.env.enclBasicBlocks.add(thenBB); if (invocationExpr.functionPointerInvocation) { this.env.enclBB.terminator = new BIRTerminator.FPCall(invocationExpr.pos, InstructionKind.FP_CALL, fp, args, lhsOp, invocationExpr.async, transactional, thenBB, this.currentScope); } else if (invocationExpr.async) { BInvokableSymbol bInvokableSymbol = (BInvokableSymbol) invocationExpr.symbol; List<BIRAnnotationAttachment> calleeAnnots = getStatementAnnotations(bInvokableSymbol.annAttachments, this.env); List<BIRAnnotationAttachment> annots = getStatementAnnotations(invocationExpr.annAttachments, this.env); this.env.enclBB.terminator = new BIRTerminator.AsyncCall(invocationExpr.pos, InstructionKind.ASYNC_CALL, isVirtual, invocationExpr.symbol.pkgID, getFuncName((BInvokableSymbol) invocationExpr.symbol), args, lhsOp, thenBB, annots, calleeAnnots, bInvokableSymbol.getFlags(), this.currentScope); } else { BInvokableSymbol bInvokableSymbol = (BInvokableSymbol) invocationExpr.symbol; List<BIRAnnotationAttachment> calleeAnnots = getStatementAnnotations(bInvokableSymbol.annAttachments, this.env); this.env.enclBB.terminator = new BIRTerminator.Call(invocationExpr.pos, InstructionKind.CALL, isVirtual, invocationExpr.symbol.pkgID, getFuncName((BInvokableSymbol) invocationExpr.symbol), args, lhsOp, thenBB, calleeAnnots, bInvokableSymbol.getFlags(), this.currentScope); } this.env.enclBB = thenBB; } @Override public void visit(BLangReturn astReturnStmt) { astReturnStmt.expr.accept(this); BIROperand retVarRef = new BIROperand(this.env.enclFunc.returnVariable); setScopeAndEmit(new Move(astReturnStmt.pos, this.env.targetOperand, retVarRef)); if (this.env.returnBB == null) { BIRBasicBlock returnBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(returnBB); returnBB.terminator = new BIRTerminator.Return(astReturnStmt.pos); this.env.returnBB = returnBB; } if (this.env.enclBB.terminator == null) { this.env.unlockVars.forEach(s -> { int i = s.size(); while (i > 0) { BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(unlockBB); BIRTerminator.Unlock unlock = new BIRTerminator.Unlock(null, unlockBB, this.currentScope); this.env.enclBB.terminator = unlock; unlock.relatedLock = s.getLock(i - 1); this.env.enclBB = unlockBB; i--; } }); this.env.enclBB.terminator = new BIRTerminator.GOTO(astReturnStmt.pos, this.env.returnBB, this.currentScope); BIRBasicBlock nextBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(nextBB); this.env.enclBB = nextBB; addToTrapStack(nextBB); } } @Override public void visit(BLangPanic panicNode) { panicNode.expr.accept(this); if (this.env.returnBB == null) { BIRBasicBlock returnBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(returnBB); returnBB.terminator = new BIRTerminator.Return(panicNode.pos); this.env.returnBB = returnBB; } this.env.enclBB.terminator = new BIRTerminator.Panic(panicNode.pos, this.env.targetOperand, this.currentScope); BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(unlockBB); this.env.enclBasicBlocks.add(unlockBB); this.env.enclBB = unlockBB; } @Override public void visit(BLangIf astIfStmt) { astIfStmt.expr.accept(this); BIROperand ifExprResult = this.env.targetOperand; BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); this.env.enclBasicBlocks.add(thenBB); BIRBasicBlock nextBB = new BIRBasicBlock(this.env.nextBBId(names)); BIRTerminator.Branch branchIns = new BIRTerminator.Branch(astIfStmt.pos, ifExprResult, thenBB, null, this.currentScope); this.env.enclBB.terminator = branchIns; this.env.enclBB = thenBB; astIfStmt.body.accept(this); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(null, nextBB, this.currentScope); } if (astIfStmt.elseStmt != null) { BIRBasicBlock elseBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(elseBB); this.env.enclBasicBlocks.add(elseBB); branchIns.falseBB = elseBB; this.env.enclBB = elseBB; astIfStmt.elseStmt.accept(this); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(null, nextBB, this.currentScope); } } else { branchIns.falseBB = nextBB; } addToTrapStack(nextBB); this.env.enclBasicBlocks.add(nextBB); this.env.enclBB = nextBB; } @Override public void visit(BLangWhile astWhileStmt) { BIRBasicBlock currentEnclLoopBB = this.env.enclLoopBB; BIRBasicBlock currentEnclLoopEndBB = this.env.enclLoopEndBB; BIRBasicBlock whileExprBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(whileExprBB); this.env.enclBasicBlocks.add(whileExprBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(astWhileStmt.pos, whileExprBB, this.currentScope); this.env.enclBB = whileExprBB; astWhileStmt.expr.accept(this); BIROperand whileExprResult = this.env.targetOperand; BIRBasicBlock whileBodyBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(whileBodyBB); this.env.enclBasicBlocks.add(whileBodyBB); BIRBasicBlock whileEndBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(whileEndBB); this.env.enclBB.terminator = new BIRTerminator.Branch(astWhileStmt.pos, whileExprResult, whileBodyBB, whileEndBB, this.currentScope); this.env.enclBB = whileBodyBB; this.env.enclLoopBB = whileExprBB; this.env.enclLoopEndBB = whileEndBB; this.env.unlockVars.push(new BIRLockDetailsHolder()); astWhileStmt.body.accept(this); this.env.unlockVars.pop(); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(null, whileExprBB, this.currentScope); } this.env.enclBasicBlocks.add(whileEndBB); this.env.enclBB = whileEndBB; this.env.enclLoopBB = currentEnclLoopBB; this.env.enclLoopEndBB = currentEnclLoopEndBB; } @Override public void visit(BLangIgnoreExpr ignoreExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(ignoreExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); } @Override public void visit(BLangLiteral astLiteralExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(astLiteralExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.ConstantLoad(astLiteralExpr.pos, astLiteralExpr.value, astLiteralExpr.getBType(), toVarRef)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangMapLiteral astMapLiteralExpr) { visitTypedesc(astMapLiteralExpr.pos, astMapLiteralExpr.getBType(), Collections.emptyList()); BIRVariableDcl tempVarDcl = new BIRVariableDcl(astMapLiteralExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.NewStructure(astMapLiteralExpr.pos, toVarRef, this.env.targetOperand, generateMappingConstructorEntries(astMapLiteralExpr.fields))); this.env.targetOperand = toVarRef; } @Override public void visit(BLangTypeConversionExpr astTypeConversionExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(astTypeConversionExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); astTypeConversionExpr.expr.accept(this); BIROperand rhsOp = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.TypeCast(astTypeConversionExpr.pos, toVarRef, rhsOp, toVarRef.variableDcl.type, astTypeConversionExpr.checkTypes)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangStructLiteral astStructLiteralExpr) { List<BIROperand> varDcls = mapToVarDcls(astStructLiteralExpr.enclMapSymbols); visitTypedesc(astStructLiteralExpr.pos, astStructLiteralExpr.getBType(), varDcls); BIRVariableDcl tempVarDcl = new BIRVariableDcl(astStructLiteralExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); BIRNonTerminator.NewStructure instruction = new BIRNonTerminator.NewStructure(astStructLiteralExpr.pos, toVarRef, this.env.targetOperand, generateMappingConstructorEntries(astStructLiteralExpr.fields)); setScopeAndEmit(instruction); this.env.targetOperand = toVarRef; } private List<BIROperand> mapToVarDcls(TreeMap<Integer, BVarSymbol> enclMapSymbols) { if (enclMapSymbols == null || enclMapSymbols.size() == 0) { return Collections.emptyList(); } ArrayList<BIROperand> varDcls = new ArrayList<>(enclMapSymbols.size()); for (BVarSymbol varSymbol : enclMapSymbols.values()) { BIRVariableDcl varDcl = this.env.symbolVarMap.get(varSymbol); varDcls.add(new BIROperand(varDcl)); } return varDcls; } @Override public void visit(BLangTypeInit connectorInitExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(connectorInitExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); BTypeSymbol objectTypeSymbol = getObjectTypeSymbol(connectorInitExpr.getBType()); BIRNonTerminator.NewInstance instruction; if (isInSamePackage(objectTypeSymbol, env.enclPkg.packageID)) { BIRTypeDefinition def = typeDefs.get(objectTypeSymbol); instruction = new BIRNonTerminator.NewInstance(connectorInitExpr.pos, def, toVarRef); } else { BType connectorInitExprType = types.getReferredType(connectorInitExpr.getBType()); BType objectType = connectorInitExprType.tag != TypeTags.UNION ? connectorInitExprType : ((BUnionType) connectorInitExprType).getMemberTypes().stream() .filter(bType -> bType.tag != TypeTags.ERROR) .findFirst() .get(); String objectName = objectType.tsymbol.name.value; instruction = new BIRNonTerminator.NewInstance(connectorInitExpr.pos, objectTypeSymbol.pkgID, objectName, toVarRef); } setScopeAndEmit(instruction); this.env.targetOperand = toVarRef; } private boolean isInSamePackage(BSymbol objectTypeSymbol, PackageID packageID) { return objectTypeSymbol.pkgID.equals(packageID); } @Override public void visit(BLangSimpleVarRef.BLangFieldVarRef fieldVarRef) { } @Override public void visit(BLangArrayLiteral astArrayLiteralExpr) { generateListConstructorExpr(astArrayLiteralExpr); } @Override public void visit(BLangTupleLiteral tupleLiteral) { generateListConstructorExpr(tupleLiteral); } @Override public void visit(BLangGroupExpr groupExpr) { groupExpr.expression.accept(this); } @Override public void visit(BLangJSONArrayLiteral jsonArrayLiteralExpr) { generateListConstructorExpr(jsonArrayLiteralExpr); } @Override public void visit(BLangMapAccessExpr astMapAccessExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; BIROperand rhsOp = this.env.targetOperand; astMapAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astMapAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (variableStore) { setScopeAndEmit( new BIRNonTerminator.FieldAccess(astMapAccessExpr.pos, InstructionKind.MAP_STORE, varRefRegIndex, keyRegIndex, rhsOp, astMapAccessExpr.isStoreOnCreation)); return; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(astMapAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.FieldAccess(astMapAccessExpr.pos, InstructionKind.MAP_LOAD, tempVarRef, keyRegIndex, varRefRegIndex, astMapAccessExpr.optionalFieldAccess, astMapAccessExpr.isLValue && !astMapAccessExpr.leafNode)); this.env.targetOperand = tempVarRef; this.varAssignment = variableStore; } @Override public void visit(BLangTableAccessExpr astTableAccessExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; BIROperand rhsOp = this.env.targetOperand; astTableAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astTableAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (variableStore) { setScopeAndEmit(new BIRNonTerminator.FieldAccess(astTableAccessExpr.pos, InstructionKind.TABLE_STORE, varRefRegIndex, keyRegIndex, rhsOp)); return; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(astTableAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.FieldAccess(astTableAccessExpr.pos, InstructionKind.TABLE_LOAD, tempVarRef, keyRegIndex, varRefRegIndex)); this.env.targetOperand = tempVarRef; this.varAssignment = variableStore; } @Override public void visit(BLangStructFieldAccessExpr astStructFieldAccessExpr) { generateMappingAccess(astStructFieldAccessExpr, astStructFieldAccessExpr.optionalFieldAccess); } @Override public void visit(BLangJSONAccessExpr astJSONFieldAccessExpr) { if (astJSONFieldAccessExpr.indexExpr.getBType().tag == TypeTags.INT) { generateArrayAccess(astJSONFieldAccessExpr); return; } generateMappingAccess(astJSONFieldAccessExpr, astJSONFieldAccessExpr.optionalFieldAccess); } @Override public void visit(BLangDynamicArgExpr dynamicParamExpr) { dynamicParamExpr.condition.accept(this); dynamicParamExpr.conditionalArgument.accept(this); } @Override public void visit(BLangStringAccessExpr stringAccessExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(stringAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); stringAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; stringAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; setScopeAndEmit(new BIRNonTerminator.FieldAccess(stringAccessExpr.pos, InstructionKind.STRING_LOAD, tempVarRef, keyRegIndex, varRefRegIndex)); this.env.targetOperand = tempVarRef; } @Override public void visit(BLangArrayAccessExpr astArrayAccessExpr) { generateArrayAccess(astArrayAccessExpr); } @Override public void visit(BLangIndexBasedAccess.BLangTupleAccessExpr tupleAccessExpr) { generateArrayAccess(tupleAccessExpr); } @Override public void visit(BLangIsLikeExpr isLikeExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.booleanType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); isLikeExpr.expr.accept(this); BIROperand exprIndex = this.env.targetOperand; setScopeAndEmit(new BIRNonTerminator.IsLike(isLikeExpr.pos, isLikeExpr.typeNode.getBType(), toVarRef, exprIndex)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangTypeTestExpr typeTestExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.booleanType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); typeTestExpr.expr.accept(this); BIROperand exprIndex = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.TypeTest(typeTestExpr.pos, typeTestExpr.typeNode.getBType(), toVarRef, exprIndex)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangLocalVarRef astVarRefExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; BSymbol varSymbol = astVarRefExpr.symbol; if (variableStore) { if (astVarRefExpr.symbol.name != Names.IGNORE) { BIROperand varRef = new BIROperand(this.env.symbolVarMap.get(varSymbol)); setScopeAndEmit(new Move(astVarRefExpr.pos, this.env.targetOperand, varRef)); } } else { BIRVariableDcl tempVarDcl = new BIRVariableDcl(varSymbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); BIRVariableDcl varDecl = this.env.symbolVarMap.get(varSymbol);; BIROperand fromVarRef = new BIROperand(varDecl); setScopeAndEmit(new Move(astVarRefExpr.pos, fromVarRef, tempVarRef)); this.env.targetOperand = tempVarRef; } this.varAssignment = variableStore; } @Override public void visit(BLangPackageVarRef astPackageVarRefExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; if (variableStore) { if (astPackageVarRefExpr.symbol.name != Names.IGNORE) { BIROperand varRef = new BIROperand(getVarRef(astPackageVarRefExpr)); setScopeAndEmit(new Move(astPackageVarRefExpr.pos, this.env.targetOperand, varRef)); } } else { BIRVariableDcl tempVarDcl = new BIRVariableDcl(astPackageVarRefExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); BIROperand fromVarRef = new BIROperand(getVarRef(astPackageVarRefExpr)); setScopeAndEmit(new Move(astPackageVarRefExpr.pos, fromVarRef, tempVarRef)); this.env.targetOperand = tempVarRef; } this.varAssignment = variableStore; } private BIRGlobalVariableDcl getVarRef(BLangPackageVarRef astPackageVarRefExpr) { BSymbol symbol = astPackageVarRefExpr.symbol; if ((symbol.tag & SymTag.CONSTANT) == SymTag.CONSTANT || !isInSamePackage(astPackageVarRefExpr.varSymbol, env.enclPkg.packageID)) { return new BIRGlobalVariableDcl(astPackageVarRefExpr.pos, symbol.flags, symbol.type, symbol.pkgID, symbol.name, symbol.getOriginalName(), VarScope.GLOBAL, VarKind.CONSTANT, symbol.name.value, symbol.origin.toBIROrigin()); } return this.globalVarMap.get(symbol); } @Override public void visit(BLangBinaryExpr astBinaryExpr) { astBinaryExpr.lhsExpr.accept(this); BIROperand rhsOp1 = this.env.targetOperand; astBinaryExpr.rhsExpr.accept(this); BIROperand rhsOp2 = this.env.targetOperand; BIRVariableDcl tempVarDcl = new BIRVariableDcl(astBinaryExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; BinaryOp binaryIns = new BinaryOp(astBinaryExpr.pos, getBinaryInstructionKind(astBinaryExpr.opKind), astBinaryExpr.getBType(), lhsOp, rhsOp1, rhsOp2); setScopeAndEmit(binaryIns); } @Override public void visit(BLangUnaryExpr unaryExpr) { unaryExpr.expr.accept(this); BIROperand rhsOp = this.env.targetOperand; BIRVariableDcl tempVarDcl = new BIRVariableDcl(unaryExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); if (OperatorKind.ADD.equals(unaryExpr.operator) || OperatorKind.UNTAINT.equals(unaryExpr.operator)) { setScopeAndEmit(new Move(unaryExpr.pos, rhsOp, lhsOp)); this.env.targetOperand = lhsOp; return; } UnaryOP unaryIns = new UnaryOP(unaryExpr.pos, getUnaryInstructionKind(unaryExpr.operator), lhsOp, rhsOp); setScopeAndEmit(unaryIns); this.env.targetOperand = lhsOp; } @Override public void visit(BLangTrapExpr trapExpr) { BIRBasicBlock trapBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(trapBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(trapExpr.pos, trapBB, this.currentScope); this.env.enclBB = trapBB; this.env.trapBlocks.push(new ArrayList<>()); addToTrapStack(trapBB); trapExpr.expr.accept(this); List<BIRBasicBlock> trappedBlocks = this.env.trapBlocks.pop(); BIRBasicBlock nextBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(nextBB); env.enclBasicBlocks.add(nextBB); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(trapExpr.pos, nextBB, this.currentScope); } env.enclFunc.errorTable.add(new BIRNode.BIRErrorEntry(trappedBlocks.get(0), trappedBlocks.get(trappedBlocks.size() - 1), env.targetOperand, nextBB)); this.env.enclBB = nextBB; } @Override public void visit(BLangWaitExpr waitExpr) { createWait(waitExpr); } @Override public void visit(BLangWaitForAllExpr.BLangWaitLiteral waitLiteral) { visitTypedesc(waitLiteral.pos, waitLiteral.getBType(), Collections.emptyList()); BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); BIRVariableDcl tempVarDcl = new BIRVariableDcl(waitLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.NewStructure(waitLiteral.pos, toVarRef, this.env.targetOperand)); this.env.targetOperand = toVarRef; List<String> keys = new ArrayList<>(); List<BIROperand> valueExprs = new ArrayList<>(); for (BLangWaitForAllExpr.BLangWaitKeyValue keyValue : waitLiteral.keyValuePairs) { keys.add(keyValue.key.value); BLangExpression expr = keyValue.valueExpr != null ? keyValue.valueExpr : keyValue.keyExpr; expr.accept(this); BIROperand valueRegIndex = this.env.targetOperand; valueExprs.add(valueRegIndex); } this.env.enclBB.terminator = new BIRTerminator.WaitAll(waitLiteral.pos, toVarRef, keys, valueExprs, thenBB, this.currentScope); this.env.targetOperand = toVarRef; this.env.enclFunc.basicBlocks.add(thenBB); this.env.enclBB = thenBB; } @Override public void visit(BLangIsAssignableExpr assignableExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.booleanType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); assignableExpr.lhsExpr.accept(this); BIROperand exprIndex = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.TypeTest(assignableExpr.pos, assignableExpr.targetType, toVarRef, exprIndex)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLQName xmlQName) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.anyType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); if (!xmlQName.isUsedInXML) { String qName = xmlQName.namespaceURI == null ? xmlQName.localname.value : ("{" + xmlQName.namespaceURI + "}" + xmlQName.localname); generateStringLiteral(qName); return; } BIROperand nsURIIndex = generateStringLiteral(xmlQName.namespaceURI); BIROperand localnameIndex = generateStringLiteral(xmlQName.localname.value); BIROperand prefixIndex = generateStringLiteral(xmlQName.prefix.value); BIRNonTerminator.NewXMLQName newXMLQName = new BIRNonTerminator.NewXMLQName(xmlQName.pos, toVarRef, localnameIndex, nsURIIndex, prefixIndex); setScopeAndEmit(newXMLQName); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLElementLiteral xmlElementLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlElementLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); xmlElementLiteral.inlineNamespaces.forEach(xmlns -> xmlns.accept(this)); BLangExpression startTagName = (BLangExpression) xmlElementLiteral.getStartTagName(); startTagName.accept(this); BIROperand startTagNameIndex = this.env.targetOperand; BIROperand defaultNsURIVarRef = generateNamespaceRef(xmlElementLiteral.defaultNsSymbol, xmlElementLiteral.pos); BIRNonTerminator.NewXMLElement newXMLElement = new BIRNonTerminator.NewXMLElement(xmlElementLiteral.pos, toVarRef, startTagNameIndex, defaultNsURIVarRef, Symbols.isFlagOn(xmlElementLiteral.getBType().flags, Flags.READONLY)); setScopeAndEmit(newXMLElement); populateXML(xmlElementLiteral, toVarRef); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLAttribute attribute) { BIROperand xmlVarRef = this.env.targetOperand; attribute.name.accept(this); BIROperand attrNameOp = this.env.targetOperand; attribute.value.accept(this); BIROperand attrValueOp = this.env.targetOperand; setScopeAndEmit(new BIRNonTerminator.FieldAccess(attribute.pos, InstructionKind.XML_ATTRIBUTE_STORE, xmlVarRef, attrNameOp, attrValueOp)); } @Override public void visit(BLangXMLSequenceLiteral xmlSequenceLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlSequenceLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); BIRNonTerminator.NewXMLSequence newXMLSequence = new BIRNonTerminator.NewXMLSequence(xmlSequenceLiteral.pos, toVarRef); setScopeAndEmit(newXMLSequence); populateXMLSequence(xmlSequenceLiteral, toVarRef); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLTextLiteral xmlTextLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlTextLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); xmlTextLiteral.concatExpr.accept(this); BIROperand xmlTextIndex = this.env.targetOperand; BIRNonTerminator.NewXMLText newXMLElement = new BIRNonTerminator.NewXMLText(xmlTextLiteral.pos, toVarRef, xmlTextIndex); setScopeAndEmit(newXMLElement); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLCommentLiteral xmlCommentLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlCommentLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); xmlCommentLiteral.concatExpr.accept(this); BIROperand xmlCommentIndex = this.env.targetOperand; BIRNonTerminator.NewXMLComment newXMLComment = new BIRNonTerminator.NewXMLComment(xmlCommentLiteral.pos, toVarRef, xmlCommentIndex, Symbols.isFlagOn(xmlCommentLiteral.getBType().flags, Flags.READONLY)); setScopeAndEmit(newXMLComment); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLProcInsLiteral xmlProcInsLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlProcInsLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); xmlProcInsLiteral.dataConcatExpr.accept(this); BIROperand dataIndex = this.env.targetOperand; xmlProcInsLiteral.target.accept(this); BIROperand targetIndex = this.env.targetOperand; BIRNonTerminator.NewXMLProcIns newXMLProcIns = new BIRNonTerminator.NewXMLProcIns(xmlProcInsLiteral.pos, toVarRef, dataIndex, targetIndex, Symbols.isFlagOn(xmlProcInsLiteral.getBType().flags, Flags.READONLY)); setScopeAndEmit(newXMLProcIns); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLQuotedString xmlQuotedString) { xmlQuotedString.concatExpr.accept(this); } @Override public void visit(BLangXMLNSStatement xmlnsStmtNode) { xmlnsStmtNode.xmlnsDecl.accept(this); } @Override public void visit(BLangXMLNS xmlnsNode) { } @Override public void visit(BLangLocalXMLNS xmlnsNode) { generateXMLNamespace(xmlnsNode); } @Override public void visit(BLangPackageXMLNS xmlnsNode) { generateXMLNamespace(xmlnsNode); } @Override public void visit(BLangXMLAccessExpr xmlAccessExpr) { generateMappingAccess(xmlAccessExpr, false); } @Override public void visit(BLangTypedescExpr accessExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(accessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.NewTypeDesc(accessExpr.pos, toVarRef, accessExpr.resolvedType, Collections.emptyList())); this.env.targetOperand = toVarRef; } @Override public void visit(BLangTableConstructorExpr tableConstructorExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(tableConstructorExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); BLangArrayLiteral keySpecifierLiteral = new BLangArrayLiteral(); keySpecifierLiteral.pos = tableConstructorExpr.pos; keySpecifierLiteral.setBType(symTable.stringArrayType); keySpecifierLiteral.exprs = new ArrayList<>(); BTableType type = (BTableType) tableConstructorExpr.getBType(); if (type.fieldNameList != null) { type.fieldNameList.forEach(col -> { BLangLiteral colLiteral = new BLangLiteral(); colLiteral.pos = tableConstructorExpr.pos; colLiteral.setBType(symTable.stringType); colLiteral.value = col; keySpecifierLiteral.exprs.add(colLiteral); }); } keySpecifierLiteral.accept(this); BIROperand keyColOp = this.env.targetOperand; BLangArrayLiteral dataLiteral = new BLangArrayLiteral(); dataLiteral.pos = tableConstructorExpr.pos; dataLiteral.setBType(new BArrayType(((BTableType) tableConstructorExpr.getBType()).constraint)); dataLiteral.exprs = new ArrayList<>(tableConstructorExpr.recordLiteralList); dataLiteral.accept(this); BIROperand dataOp = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.NewTable(tableConstructorExpr.pos, tableConstructorExpr.getBType(), toVarRef, keyColOp, dataOp)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangSimpleVarRef.BLangTypeLoad typeLoad) { visitTypedesc(typeLoad.pos, typeLoad.symbol.type, Collections.emptyList()); } private void visitTypedesc(Location pos, BType type, List<BIROperand> varDcls) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.typeDesc, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind .TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.NewTypeDesc(pos, toVarRef, type, varDcls)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangBreak breakStmt) { BIRLockDetailsHolder toUnlock = this.env.unlockVars.peek(); if (!toUnlock.isEmpty()) { BIRBasicBlock goToBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(goToBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(breakStmt.pos, goToBB, this.currentScope); this.env.enclBB = goToBB; } int numLocks = toUnlock.size(); while (numLocks > 0) { BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(unlockBB); BIRTerminator.Unlock unlock = new BIRTerminator.Unlock(null, unlockBB, this.currentScope); this.env.enclBB.terminator = unlock; unlock.relatedLock = toUnlock.getLock(numLocks - 1); this.env.enclBB = unlockBB; numLocks--; } this.env.enclBB.terminator = new BIRTerminator.GOTO(breakStmt.pos, this.env.enclLoopEndBB, this.currentScope); } @Override public void visit(BLangContinue continueStmt) { BIRLockDetailsHolder toUnlock = this.env.unlockVars.peek(); if (!toUnlock.isEmpty()) { BIRBasicBlock goToBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(goToBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(continueStmt.pos, goToBB, this.currentScope); this.env.enclBB = goToBB; } int numLocks = toUnlock.size(); while (numLocks > 0) { BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(unlockBB); BIRTerminator.Unlock unlock = new BIRTerminator.Unlock(null, unlockBB, this.currentScope); this.env.enclBB.terminator = unlock; BIRTerminator.Lock lock = toUnlock.getLock(numLocks - 1); unlock.relatedLock = lock; this.env.enclBB = unlockBB; numLocks--; } this.env.enclBB.terminator = new BIRTerminator.GOTO(continueStmt.pos, this.env.enclLoopBB, this.currentScope); } @Override public void visit(BLangFunctionVarRef fpVarRef) { generateFPVarRef(fpVarRef, (BInvokableSymbol) fpVarRef.symbol); } @Override public void visit(BLangStructFunctionVarRef structFpVarRef) { generateFPVarRef(structFpVarRef, (BInvokableSymbol) structFpVarRef.symbol); } @Override public void visit(BLangLockStmt lockStmt) { BIRBasicBlock lockedBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(lockedBB); this.env.enclBasicBlocks.add(lockedBB); BIRTerminator.Lock lock = new BIRTerminator.Lock(lockStmt.pos, lockedBB, this.currentScope); this.env.enclBB.terminator = lock; lockStmtMap.put(lockStmt, lock); this.env.unlockVars.peek().addLock(lock); populateBirLockWithGlobalVars(lockStmt); this.env.enclBB = lockedBB; } private void populateBirLockWithGlobalVars(BLangLockStmt lockStmt) { for (BVarSymbol globalVar : lockStmt.lockVariables) { BIRGlobalVariableDcl birGlobalVar = this.globalVarMap.get(globalVar); if (birGlobalVar == null) { birGlobalVar = dummyGlobalVarMapForLocks.computeIfAbsent(globalVar, k -> new BIRGlobalVariableDcl(null, globalVar.flags, globalVar.type, globalVar.pkgID, globalVar.name, globalVar.getOriginalName(), VarScope.GLOBAL, VarKind.GLOBAL, globalVar.name.value, globalVar.origin.toBIROrigin())); } ((BIRTerminator.Lock) this.env.enclBB.terminator).lockVariables.add(birGlobalVar); } } @Override public void visit(BLangUnLockStmt unLockStmt) { BIRLockDetailsHolder lockDetailsHolder = this.env.unlockVars.peek(); if (lockDetailsHolder.isEmpty()) { return; } BIRBasicBlock unLockedBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(unLockedBB); this.env.enclBasicBlocks.add(unLockedBB); this.env.enclBB.terminator = new BIRTerminator.Unlock(unLockStmt.pos, unLockedBB, this.currentScope); ((BIRTerminator.Unlock) this.env.enclBB.terminator).relatedLock = lockStmtMap.get(unLockStmt.relatedLock); this.env.enclBB = unLockedBB; lockDetailsHolder.removeLastLock(); } private void setScopeAndEmit(BIRNonTerminator instruction) { instruction.scope = this.currentScope; this.env.enclBB.instructions.add(instruction); } private InstructionKind getBinaryInstructionKind(OperatorKind opKind) { switch (opKind) { case ADD: return InstructionKind.ADD; case SUB: return InstructionKind.SUB; case MUL: return InstructionKind.MUL; case DIV: return InstructionKind.DIV; case MOD: return InstructionKind.MOD; case EQUAL: case EQUALS: return InstructionKind.EQUAL; case NOT_EQUAL: return InstructionKind.NOT_EQUAL; case GREATER_THAN: return InstructionKind.GREATER_THAN; case GREATER_EQUAL: return InstructionKind.GREATER_EQUAL; case LESS_THAN: return InstructionKind.LESS_THAN; case LESS_EQUAL: return InstructionKind.LESS_EQUAL; case AND: return InstructionKind.AND; case OR: return InstructionKind.OR; case REF_EQUAL: return InstructionKind.REF_EQUAL; case REF_NOT_EQUAL: return InstructionKind.REF_NOT_EQUAL; case CLOSED_RANGE: return InstructionKind.CLOSED_RANGE; case HALF_OPEN_RANGE: return InstructionKind.HALF_OPEN_RANGE; case ANNOT_ACCESS: return InstructionKind.ANNOT_ACCESS; case BITWISE_AND: return InstructionKind.BITWISE_AND; case BITWISE_OR: return InstructionKind.BITWISE_OR; case BITWISE_XOR: return InstructionKind.BITWISE_XOR; case BITWISE_LEFT_SHIFT: return InstructionKind.BITWISE_LEFT_SHIFT; case BITWISE_RIGHT_SHIFT: return InstructionKind.BITWISE_RIGHT_SHIFT; case BITWISE_UNSIGNED_RIGHT_SHIFT: return InstructionKind.BITWISE_UNSIGNED_RIGHT_SHIFT; default: throw new IllegalStateException("unsupported binary operation: " + opKind.value()); } } private InstructionKind getUnaryInstructionKind(OperatorKind opKind) { switch (opKind) { case TYPEOF: return InstructionKind.TYPEOF; case NOT: return InstructionKind.NOT; case SUB: return InstructionKind.NEGATE; case ADD: return InstructionKind.MOVE; default: throw new IllegalStateException("unsupported unary operator: " + opKind.value()); } } private void generateListConstructorExpr(BLangListConstructorExpr listConstructorExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(listConstructorExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); long size = -1L; List<BLangExpression> exprs = listConstructorExpr.exprs; BType listConstructorExprType = types.getReferredType(listConstructorExpr.getBType()); if (listConstructorExprType.tag == TypeTags.ARRAY && ((BArrayType) listConstructorExprType).state != BArrayState.OPEN) { size = ((BArrayType) listConstructorExprType).size; } else if (listConstructorExprType.tag == TypeTags.TUPLE) { size = exprs.size(); } BLangLiteral literal = new BLangLiteral(); literal.pos = listConstructorExpr.pos; literal.value = size; literal.setBType(symTable.intType); literal.accept(this); BIROperand sizeOp = this.env.targetOperand; List<BIROperand> valueOperands = new ArrayList<>(exprs.size()); for (BLangExpression expr : exprs) { expr.accept(this); valueOperands.add(this.env.targetOperand); } setScopeAndEmit( new BIRNonTerminator.NewArray(listConstructorExpr.pos, listConstructorExprType, toVarRef, sizeOp, valueOperands)); this.env.targetOperand = toVarRef; } private void generateArrayAccess(BLangIndexBasedAccess astArrayAccessExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; BIROperand rhsOp = this.env.targetOperand; astArrayAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astArrayAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (variableStore) { setScopeAndEmit(new BIRNonTerminator.FieldAccess(astArrayAccessExpr.pos, InstructionKind.ARRAY_STORE, varRefRegIndex, keyRegIndex, rhsOp)); return; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(astArrayAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.FieldAccess(astArrayAccessExpr.pos, InstructionKind.ARRAY_LOAD, tempVarRef, keyRegIndex, varRefRegIndex, false, astArrayAccessExpr.isLValue && !astArrayAccessExpr.leafNode)); this.env.targetOperand = tempVarRef; this.varAssignment = variableStore; } private void generateMappingAccess(BLangIndexBasedAccess astIndexBasedAccessExpr, boolean except) { boolean variableStore = this.varAssignment; this.varAssignment = false; InstructionKind insKind; BType astAccessExprExprType = types.getReferredType(astIndexBasedAccessExpr.expr.getBType()); if (variableStore) { BIROperand rhsOp = this.env.targetOperand; astIndexBasedAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astIndexBasedAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (astIndexBasedAccessExpr.getKind() == NodeKind.XML_ATTRIBUTE_ACCESS_EXPR) { insKind = InstructionKind.XML_ATTRIBUTE_STORE; keyRegIndex = getQNameOP(astIndexBasedAccessExpr.indexExpr, keyRegIndex); } else if (astAccessExprExprType.tag == TypeTags.OBJECT || (astAccessExprExprType.tag == TypeTags.UNION && ((BUnionType) astAccessExprExprType).getMemberTypes().iterator() .next().tag == TypeTags.OBJECT)) { insKind = InstructionKind.OBJECT_STORE; } else { insKind = InstructionKind.MAP_STORE; } setScopeAndEmit( new BIRNonTerminator.FieldAccess(astIndexBasedAccessExpr.pos, insKind, varRefRegIndex, keyRegIndex, rhsOp, astIndexBasedAccessExpr.isStoreOnCreation)); } else { BIRVariableDcl tempVarDcl = new BIRVariableDcl(astIndexBasedAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); astIndexBasedAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astIndexBasedAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (astIndexBasedAccessExpr.getKind() == NodeKind.XML_ATTRIBUTE_ACCESS_EXPR) { insKind = InstructionKind.XML_ATTRIBUTE_LOAD; keyRegIndex = getQNameOP(astIndexBasedAccessExpr.indexExpr, keyRegIndex); } else if (TypeTags.isXMLTypeTag(astAccessExprExprType.tag)) { generateXMLAccess((BLangXMLAccessExpr) astIndexBasedAccessExpr, tempVarRef, varRefRegIndex, keyRegIndex); this.varAssignment = variableStore; return; } else if (astAccessExprExprType.tag == TypeTags.OBJECT || (astAccessExprExprType.tag == TypeTags.UNION && ((BUnionType) astAccessExprExprType).getMemberTypes().iterator() .next().tag == TypeTags.OBJECT)) { insKind = InstructionKind.OBJECT_LOAD; } else { insKind = InstructionKind.MAP_LOAD; } setScopeAndEmit( new BIRNonTerminator.FieldAccess(astIndexBasedAccessExpr.pos, insKind, tempVarRef, keyRegIndex, varRefRegIndex, except, astIndexBasedAccessExpr.isLValue && !astIndexBasedAccessExpr.leafNode)); this.env.targetOperand = tempVarRef; } this.varAssignment = variableStore; } private BTypeSymbol getObjectTypeSymbol(BType objType) { BType type = types.getReferredType(objType); if (type.tag == TypeTags.UNION) { return ((BUnionType) type).getMemberTypes().stream() .filter(t -> t.tag == TypeTags.OBJECT) .findFirst() .orElse(symTable.noType).tsymbol; } return type.tsymbol; } private BIROperand generateStringLiteral(String value) { BLangLiteral prefixLiteral = (BLangLiteral) TreeBuilder.createLiteralExpression(); prefixLiteral.value = value; if (value == null) { prefixLiteral.setBType(symTable.nilType); } else { prefixLiteral.setBType(symTable.stringType); } prefixLiteral.accept(this); return this.env.targetOperand; } private void generateXMLNamespace(BLangXMLNS xmlnsNode) { BIRVariableDcl birVarDcl = new BIRVariableDcl(xmlnsNode.pos, symTable.stringType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.LOCAL, null); this.env.enclFunc.localVars.add(birVarDcl); this.env.symbolVarMap.put(xmlnsNode.symbol, birVarDcl); xmlnsNode.namespaceURI.accept(this); BIROperand varRef = new BIROperand(birVarDcl); setScopeAndEmit(new Move(xmlnsNode.pos, this.env.targetOperand, varRef)); } private BIROperand generateNamespaceRef(BXMLNSSymbol nsSymbol, Location pos) { if (nsSymbol == null) { return generateStringLiteral(null); } int ownerTag = nsSymbol.owner.tag; if ((ownerTag & SymTag.PACKAGE) == SymTag.PACKAGE || (ownerTag & SymTag.OBJECT) == SymTag.OBJECT || (ownerTag & SymTag.RECORD) == SymTag.RECORD) { return generateStringLiteral(nsSymbol.namespaceURI); } BIRVariableDcl nsURIVarDcl = new BIRVariableDcl(symTable.stringType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(nsURIVarDcl); BIROperand nsURIVarRef = new BIROperand(nsURIVarDcl); BIRVariableDcl varDecl = this.env.symbolVarMap.get(nsSymbol); BIROperand fromVarRef = new BIROperand(varDecl); setScopeAndEmit(new Move(pos, fromVarRef, nsURIVarRef)); return nsURIVarRef; } private void populateXMLSequence(BLangXMLSequenceLiteral xmlSequenceLiteral, BIROperand toVarRef) { for (BLangExpression xmlItem : xmlSequenceLiteral.xmlItems) { xmlItem.accept(this); BIROperand childOp = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.XMLAccess(xmlItem.pos, InstructionKind.XML_SEQ_STORE, toVarRef, childOp)); } } private void populateXML(BLangXMLElementLiteral xmlElementLiteral, BIROperand toVarRef) { xmlElementLiteral.namespacesInScope.forEach((name, symbol) -> { BLangXMLQName nsQName = new BLangXMLQName(name.getValue(), XMLConstants.XMLNS_ATTRIBUTE); nsQName.setBType(symTable.stringType); nsQName.accept(this); BIROperand nsQNameIndex = this.env.targetOperand; BIROperand nsURIIndex = generateNamespaceRef(symbol, xmlElementLiteral.pos); setScopeAndEmit(new BIRNonTerminator.FieldAccess(xmlElementLiteral.pos, InstructionKind.XML_ATTRIBUTE_STORE, toVarRef, nsQNameIndex, nsURIIndex)); }); xmlElementLiteral.attributes.forEach(attribute -> { this.env.targetOperand = toVarRef; attribute.accept(this); }); xmlElementLiteral.modifiedChildren.forEach(child -> { child.accept(this); BIROperand childOp = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.XMLAccess(child.pos, InstructionKind.XML_SEQ_STORE, toVarRef, childOp)); }); } private BIROperand getQNameOP(BLangExpression qnameExpr, BIROperand keyRegIndex) { if (qnameExpr.getKind() == NodeKind.XML_QNAME) { return keyRegIndex; } BIRVariableDcl tempQNameVarDcl = new BIRVariableDcl(symTable.anyType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempQNameVarDcl); BIROperand qnameVarRef = new BIROperand(tempQNameVarDcl); setScopeAndEmit(new BIRNonTerminator.NewStringXMLQName(qnameExpr.pos, qnameVarRef, keyRegIndex)); return qnameVarRef; } private void generateXMLAccess(BLangXMLAccessExpr xmlAccessExpr, BIROperand tempVarRef, BIROperand varRefRegIndex, BIROperand keyRegIndex) { this.env.targetOperand = tempVarRef; InstructionKind insKind; if (xmlAccessExpr.fieldType == FieldKind.ALL) { setScopeAndEmit(new BIRNonTerminator.XMLAccess(xmlAccessExpr.pos, InstructionKind.XML_LOAD_ALL, tempVarRef, varRefRegIndex)); return; } else if (xmlAccessExpr.indexExpr.getBType().tag == TypeTags.STRING) { insKind = InstructionKind.XML_LOAD; } else { insKind = InstructionKind.XML_SEQ_LOAD; } setScopeAndEmit( new BIRNonTerminator.FieldAccess(xmlAccessExpr.pos, insKind, tempVarRef, keyRegIndex, varRefRegIndex)); } private void generateFPVarRef(BLangExpression fpVarRef, BInvokableSymbol funcSymbol) { BIRVariableDcl tempVarLambda = new BIRVariableDcl(fpVarRef.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarLambda); BIROperand lhsOp = new BIROperand(tempVarLambda); Name funcName = getFuncName(funcSymbol); List<BIRVariableDcl> params = new ArrayList<>(); funcSymbol.params.forEach(param -> { BIRVariableDcl birVarDcl = new BIRVariableDcl(fpVarRef.pos, param.type, this.env.nextLambdaVarId(names), VarScope.FUNCTION, VarKind.ARG, null); params.add(birVarDcl); }); BVarSymbol restParam = funcSymbol.restParam; if (restParam != null) { BIRVariableDcl birVarDcl = new BIRVariableDcl(fpVarRef.pos, restParam.type, this.env.nextLambdaVarId(names), VarScope.FUNCTION, VarKind.ARG, null); params.add(birVarDcl); } setScopeAndEmit( new BIRNonTerminator.FPLoad(fpVarRef.pos, funcSymbol.pkgID, funcName, lhsOp, params, new ArrayList<>(), funcSymbol.type, funcSymbol.strandName, funcSymbol.schedulerPolicy)); this.env.targetOperand = lhsOp; } private void populateBIRAnnotAttachments(List<BLangAnnotationAttachment> astAnnotAttachments, List<BIRAnnotationAttachment> birAnnotAttachments, BIRGenEnv currentEnv) { currentEnv.enclAnnotAttachments = birAnnotAttachments; astAnnotAttachments.forEach(annotAttach -> annotAttach.accept(this)); currentEnv.enclAnnotAttachments = null; } private void addToTrapStack(BIRBasicBlock birBasicBlock) { if (this.env.trapBlocks.isEmpty()) { return; } this.env.trapBlocks.peek().add(birBasicBlock); } private List<BIRAnnotationAttachment> getStatementAnnotations(List<BLangAnnotationAttachment> astAnnotAttachments, BIRGenEnv currentEnv) { List<BIRAnnotationAttachment> functionAnnotAttachments = currentEnv.enclAnnotAttachments; currentEnv.enclAnnotAttachments = new ArrayList<>(); astAnnotAttachments.forEach(annotAttach -> annotAttach.accept(this)); List<BIRAnnotationAttachment> statementAnnots = currentEnv.enclAnnotAttachments; currentEnv.enclAnnotAttachments = functionAnnotAttachments; return statementAnnots; } private List<BIRNode.BIRMappingConstructorEntry> generateMappingConstructorEntries( List<RecordLiteralNode.RecordField> fields) { List<BIRNode.BIRMappingConstructorEntry> initialValues = new ArrayList<>(fields.size()); for (RecordLiteralNode.RecordField field : fields) { if (field.isKeyValueField()) { BLangRecordKeyValueField keyValueField = (BLangRecordKeyValueField) field; keyValueField.key.expr.accept(this); BIROperand keyOperand = this.env.targetOperand; keyValueField.valueExpr.accept(this); BIROperand valueOperand = this.env.targetOperand; initialValues.add(new BIRNode.BIRMappingConstructorKeyValueEntry(keyOperand, valueOperand)); continue; } BLangRecordLiteral.BLangRecordSpreadOperatorField spreadField = (BLangRecordLiteral.BLangRecordSpreadOperatorField) field; spreadField.expr.accept(this); initialValues.add(new BIRNode.BIRMappingConstructorSpreadFieldEntry(this.env.targetOperand)); } return initialValues; } }
class BIRGen extends BLangNodeVisitor { private static final CompilerContext.Key<BIRGen> BIR_GEN = new CompilerContext.Key<>(); public static final String DEFAULT_WORKER_NAME = "function"; public static final String CLONE_READ_ONLY = "cloneReadOnly"; private BIRGenEnv env; private Names names; private final SymbolTable symTable; private BIROptimizer birOptimizer; private final Types types; private boolean varAssignment = false; private Map<BSymbol, BIRTypeDefinition> typeDefs = new LinkedHashMap<>(); private BlockNode currentBlock; private Map<BlockNode, List<BIRVariableDcl>> varDclsByBlock = new HashMap<>(); public Map<BSymbol, BIRGlobalVariableDcl> globalVarMap = new HashMap<>(); private Map<BSymbol, BIRGlobalVariableDcl> dummyGlobalVarMapForLocks = new HashMap<>(); private Map<BLangLockStmt, BIRTerminator.Lock> lockStmtMap = new HashMap<>(); private static final String MOCK_ANNOTATION_DELIMITER = " private static final String MOCK_FN_DELIMITER = "~"; private Unifier unifier; private BirScope currentScope; public static BIRGen getInstance(CompilerContext context) { BIRGen birGen = context.get(BIR_GEN); if (birGen == null) { birGen = new BIRGen(context); } return birGen; } private BIRGen(CompilerContext context) { context.put(BIR_GEN, this); this.names = Names.getInstance(context); this.symTable = SymbolTable.getInstance(context); this.birOptimizer = BIROptimizer.getInstance(context); this.unifier = new Unifier(); this.types = Types.getInstance(context); } public BLangPackage genBIR(BLangPackage astPkg) { BIRPackage birPkg = new BIRPackage(astPkg.pos, astPkg.packageID.orgName, astPkg.packageID.pkgName, astPkg.packageID.name, astPkg.packageID.version, astPkg.packageID.sourceFileName); astPkg.symbol.bir = birPkg; this.env = new BIRGenEnv(birPkg); astPkg.accept(this); this.birOptimizer.optimizePackage(birPkg); if (!astPkg.moduleContextDataHolder.skipTests() && astPkg.hasTestablePackage()) { BIRPackage testBirPkg = new BIRPackage(astPkg.pos, astPkg.packageID.orgName, astPkg.packageID.pkgName, astPkg.packageID.name, astPkg.packageID.version, astPkg.packageID.sourceFileName); this.env = new BIRGenEnv(testBirPkg); astPkg.accept(this); astPkg.getTestablePkgs().forEach(testPkg -> { visitBuiltinFunctions(testPkg, testPkg.initFunction); visitBuiltinFunctions(testPkg, testPkg.startFunction); visitBuiltinFunctions(testPkg, testPkg.stopFunction); for (BLangImportPackage mod : astPkg.imports) { testPkg.imports.remove(mod); } testPkg.accept(this); this.birOptimizer.optimizePackage(testBirPkg); testPkg.symbol.bir = testBirPkg; Map<String, String> mockFunctionMap = astPkg.getTestablePkg().getMockFunctionNamesMap(); if (!mockFunctionMap.isEmpty()) { replaceMockedFunctions(testBirPkg, mockFunctionMap, astPkg.packageID); } }); } setEntryPoints(astPkg); return astPkg; } private void setEntryPoints(BLangPackage pkgNode) { BLangFunction mainFunc = getMainFunction(pkgNode); if (mainFunc != null || listenerDeclarationFound(pkgNode.getGlobalVariables()) || !pkgNode.services.isEmpty()) { pkgNode.symbol.entryPointExists = true; } } private boolean listenerDeclarationFound(List<BLangVariable> globalVars) { for (BLangVariable globalVar : globalVars) { if (Symbols.isFlagOn(globalVar.symbol.flags, Flags.LISTENER)) { return true; } } return false; } private BLangFunction getMainFunction(BLangPackage pkgNode) { for (BLangFunction funcNode : pkgNode.functions) { if (CompilerUtils.isMainFunction(funcNode)) { return funcNode; } } return null; } private void visitBuiltinFunctions(BLangPackage pkgNode, BLangFunction function) { if (Symbols.isFlagOn(pkgNode.symbol.flags, Flags.TESTABLE)) { String funcName = function.getName().value; String builtinFuncName = funcName.substring(funcName.indexOf("<") + 1, funcName.indexOf(">")); String modifiedFuncName = funcName.replace(builtinFuncName, "test" + builtinFuncName); function.name.setValue(modifiedFuncName); Name functionName = names.fromString(modifiedFuncName); function.originalFuncSymbol.name = functionName; function.symbol.name = functionName; } } private void replaceMockedFunctions(BIRPackage birPkg, Map<String, String> mockFunctionMap, PackageID packageID) { replaceFunctions(birPkg.functions, mockFunctionMap, packageID); if (birPkg.typeDefs.size() != 0) { for (BIRTypeDefinition typeDef : birPkg.typeDefs) { if (typeDef.type instanceof BObjectType) { replaceFunctions(typeDef.attachedFuncs, mockFunctionMap, packageID); } } } } private void replaceFunctions(List<BIRFunction> functionList, Map<String, String> mockFunctionMap, PackageID packageID) { for (BIRFunction function : functionList) { List<BIRBasicBlock> basicBlocks = function.basicBlocks; for (BIRBasicBlock basicBlock : basicBlocks) { BIRTerminator bbTerminator = basicBlock.terminator; if (bbTerminator.kind.equals(InstructionKind.CALL)) { BIRTerminator.Call callTerminator = (BIRTerminator.Call) bbTerminator; String functionKey = callTerminator.calleePkg.toString() + MOCK_ANNOTATION_DELIMITER + callTerminator.name.toString(); String legacyKey = callTerminator.calleePkg.toString() + MOCK_FN_DELIMITER + callTerminator.name.toString(); if (mockFunctionMap.containsKey(functionKey)) { String desugarFunction = "$MOCK_" + callTerminator.name.getValue(); callTerminator.name = new Name(desugarFunction); callTerminator.calleePkg = packageID; } else if (mockFunctionMap.get(legacyKey) != null) { String mockfunctionName = mockFunctionMap.get(legacyKey); callTerminator.name = new Name(mockfunctionName); callTerminator.calleePkg = packageID; } } } } } @Override public void visit(BLangPackage astPkg) { astPkg.imports.forEach(impPkg -> impPkg.accept(this)); astPkg.constants.forEach(astConst -> astConst.accept(this)); astPkg.typeDefinitions.forEach(astTypeDef -> astTypeDef.accept(this)); generateClassDefinitions(astPkg.topLevelNodes); astPkg.globalVars.forEach(astGlobalVar -> astGlobalVar.accept(this)); astPkg.initFunction.accept(this); astPkg.startFunction.accept(this); astPkg.stopFunction.accept(this); astPkg.functions.forEach(astFunc -> astFunc.accept(this)); astPkg.annotations.forEach(astAnn -> astAnn.accept(this)); astPkg.services.forEach(service -> service.accept(this)); } private void generateClassDefinitions(List<TopLevelNode> topLevelNodes) { for (TopLevelNode topLevelNode : topLevelNodes) { if (topLevelNode.getKind() == CLASS_DEFN) { ((BLangClassDefinition) topLevelNode).accept(this); } } } @Override private BType getDefinedType(BLangTypeDefinition astTypeDefinition) { BType nodeType = astTypeDefinition.typeNode.getBType(); if (types.getReferredType(nodeType).tag == TypeTags.ERROR) { return astTypeDefinition.symbol.type; } return nodeType; } @Override public void visit(BLangClassDefinition classDefinition) { BIRTypeDefinition typeDef = new BIRTypeDefinition(classDefinition.pos, classDefinition.symbol.name, classDefinition.symbol.originalName, classDefinition.symbol.flags, false, classDefinition.getBType(), new ArrayList<>(), classDefinition.symbol.origin.toBIROrigin()); typeDefs.put(classDefinition.symbol, typeDef); this.env.enclPkg.typeDefs.add(typeDef); typeDef.index = this.env.enclPkg.typeDefs.size() - 1; typeDef.setMarkdownDocAttachment(classDefinition.symbol.markdownDocumentation); for (BLangType typeRef : classDefinition.typeRefs) { typeDef.referencedTypes.add(typeRef.getBType()); } populateBIRAnnotAttachments(classDefinition.annAttachments, typeDef.annotAttachments, this.env); for (BAttachedFunction func : ((BObjectTypeSymbol) classDefinition.symbol).referencedFunctions) { BInvokableSymbol funcSymbol = func.symbol; BIRFunction birFunc = new BIRFunction(classDefinition.pos, func.funcName, funcSymbol.flags, func.type, names.fromString(DEFAULT_WORKER_NAME), 0, funcSymbol.origin.toBIROrigin()); if (funcSymbol.receiverSymbol != null) { birFunc.receiver = getSelf(funcSymbol.receiverSymbol); } birFunc.setMarkdownDocAttachment(funcSymbol.markdownDocumentation); int defaultableParamsCount = 0; birFunc.argsCount = funcSymbol.params.size() + defaultableParamsCount + (funcSymbol.restParam != null ? 1 : 0); funcSymbol.params.forEach(requiredParam -> addParam(birFunc, requiredParam, classDefinition.pos)); if (funcSymbol.restParam != null) { addRestParam(birFunc, funcSymbol.restParam, classDefinition.pos); } birFunc.returnVariable = new BIRVariableDcl(classDefinition.pos, funcSymbol.retType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.RETURN, null); birFunc.localVars.add(0, birFunc.returnVariable); typeDef.attachedFuncs.add(birFunc); } } @Override public void visit(BLangService serviceNode) { BServiceSymbol symbol = (BServiceSymbol) serviceNode.symbol; List<String> attachPoint = symbol.getAbsResourcePath().orElse(null); String attachPointLiteral = symbol.getAttachPointStringLiteral().orElse(null); BIRNode.BIRServiceDeclaration serviceDecl = new BIRNode.BIRServiceDeclaration(attachPoint, attachPointLiteral, symbol.getListenerTypes(), symbol.name, symbol.getAssociatedClassSymbol().name, symbol.type, symbol.origin, symbol.flags, symbol.pos); serviceDecl.setMarkdownDocAttachment(symbol.markdownDocumentation); this.env.enclPkg.serviceDecls.add(serviceDecl); } @Override public void visit(BLangConstant astConstant) { BConstantSymbol constantSymbol = astConstant.symbol; Name constName = constantSymbol.name; Name constOriginalName = constantSymbol.getOriginalName(); BType type = constantSymbol.type; ConstValue constantValue = getBIRConstantVal(constantSymbol.value); BIRConstant birConstant = new BIRConstant(astConstant.pos, constName, constOriginalName, constantSymbol.flags, type, constantValue, constantSymbol.origin.toBIROrigin()); birConstant.constValue = constantValue; birConstant.setMarkdownDocAttachment(astConstant.symbol.markdownDocumentation); this.env.enclPkg.constants.add(birConstant); } private ConstValue getBIRConstantVal(BLangConstantValue constValue) { if (constValue.type.tag == TypeTags.MAP) { Map<String, ConstValue> mapConstVal = new HashMap<>(); ((Map<String, BLangConstantValue>) constValue.value) .forEach((key, value) -> mapConstVal.put(key, getBIRConstantVal(value))); return new ConstValue(mapConstVal, constValue.type); } return new ConstValue(constValue.value, constValue.type); } @Override public void visit(BLangImportPackage impPkg) { this.env.enclPkg.importModules.add(new BIRNode.BIRImportModule(impPkg.pos, impPkg.symbol.pkgID.orgName, impPkg.symbol.pkgID.name, impPkg.symbol.pkgID.version)); } @Override public void visit(BLangResourceFunction resourceFunction) { visit((BLangFunction) resourceFunction); } @Override public void visit(BLangFunction astFunc) { BInvokableType type = astFunc.symbol.getType(); boolean isTypeAttachedFunction = astFunc.flagSet.contains(Flag.ATTACHED) && !typeDefs.containsKey(astFunc.receiver.getBType().tsymbol); Name workerName = names.fromIdNode(astFunc.defaultWorkerName); this.env.unlockVars.push(new BIRLockDetailsHolder()); BIRFunction birFunc; if (isTypeAttachedFunction) { Name funcName = names.fromString(astFunc.symbol.name.value); birFunc = new BIRFunction(astFunc.pos, funcName, names.fromString(astFunc.symbol.getOriginalName().value), astFunc.symbol.flags, type, workerName, astFunc.sendsToThis.size(), astFunc.symbol.origin.toBIROrigin()); } else { Name funcName = getFuncName(astFunc.symbol); birFunc = new BIRFunction(astFunc.pos, funcName, names.fromString(astFunc.symbol.getOriginalName().value), astFunc.symbol.flags, type, workerName, astFunc.sendsToThis.size(), astFunc.symbol.origin.toBIROrigin()); } this.currentScope = new BirScope(0, null); if (astFunc.receiver != null) { BIRFunctionParameter birVarDcl = new BIRFunctionParameter(astFunc.pos, astFunc.receiver.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.ARG, astFunc.receiver.name.value, false); this.env.symbolVarMap.put(astFunc.receiver.symbol, birVarDcl); } if (astFunc.receiver != null) { birFunc.receiver = getSelf(astFunc.receiver.symbol); } birFunc.setMarkdownDocAttachment(astFunc.symbol.markdownDocumentation); int i = 0; for (String channelName : astFunc.sendsToThis) { birFunc.workerChannels[i] = new BIRNode.ChannelDetails(channelName, astFunc.defaultWorkerName.value .equals(DEFAULT_WORKER_NAME), isWorkerSend(channelName, astFunc.defaultWorkerName.value)); i++; } if (astFunc.hasBody() && astFunc.body.getKind() == NodeKind.EXTERN_FUNCTION_BODY) { populateBIRAnnotAttachments(((BLangExternalFunctionBody) astFunc.body).annAttachments, birFunc.annotAttachments, this.env); } populateBIRAnnotAttachments(astFunc.annAttachments, birFunc.annotAttachments, this.env); populateBIRAnnotAttachments(astFunc.returnTypeAnnAttachments, birFunc.returnTypeAnnots, this.env); birFunc.argsCount = astFunc.requiredParams.size() + (astFunc.restParam != null ? 1 : 0) + astFunc.paramClosureMap.size(); if (astFunc.flagSet.contains(Flag.ATTACHED) && typeDefs.containsKey(astFunc.receiver.getBType().tsymbol)) { typeDefs.get(astFunc.receiver.getBType().tsymbol).attachedFuncs.add(birFunc); } else { this.env.enclPkg.functions.add(birFunc); } this.env.enclFunc = birFunc; BType retType = unifier.build(astFunc.symbol.type.getReturnType()); birFunc.returnVariable = new BIRVariableDcl(astFunc.pos, retType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.RETURN, null); birFunc.localVars.add(0, birFunc.returnVariable); astFunc.paramClosureMap.forEach((k, v) -> addRequiredParam(birFunc, v, astFunc.pos)); astFunc.requiredParams.forEach(requiredParam -> addParam(birFunc, requiredParam)); if (astFunc.restParam != null) { addRestParam(birFunc, astFunc.restParam.symbol, astFunc.restParam.pos); } if (astFunc.interfaceFunction || Symbols.isNative(astFunc.symbol)) { this.env.clear(); return; } BIRBasicBlock entryBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks = birFunc.basicBlocks; birFunc.basicBlocks.add(entryBB); this.env.enclBB = entryBB; addToTrapStack(entryBB); astFunc.body.accept(this); birFunc.basicBlocks.add(this.env.returnBB); BIRBasicBlock enclBB = this.env.enclBB; if (enclBB.instructions.size() == 0 && enclBB.terminator == null && this.env.returnBB != null) { enclBB.terminator = new BIRTerminator.GOTO(null, this.env.returnBB, this.currentScope); } this.env.clear(); this.env.unlockVars.clear(); birFunc.parameters.values().forEach(basicBlocks -> basicBlocks.forEach(bb -> bb.id = this.env.nextBBId(names))); birFunc.basicBlocks.forEach(bb -> bb.id = this.env.nextBBId(names)); birFunc.errorTable.sort(Comparator.comparingInt(o -> Integer.parseInt(o.trapBB.id.value.replace("bb", "")))); birFunc.dependentGlobalVars = astFunc.symbol.dependentGlobalVars.stream() .map(varSymbol -> this.globalVarMap.get(varSymbol)).collect(Collectors.toSet()); this.env.clear(); } private BIRVariableDcl getSelf(BSymbol receiver) { BIRVariableDcl self = this.env.symbolVarMap.get(receiver); if (self == null) { return new BIRVariableDcl(null, receiver.type, receiver.name, VarScope.FUNCTION, VarKind.SELF, null); } self.kind = VarKind.SELF; self.name = new Name("%self"); return self; } @Override public void visit(BLangBlockFunctionBody astBody) { BIRBasicBlock endLoopEndBB = this.env.enclLoopEndBB; BlockNode prevBlock = this.currentBlock; this.currentBlock = astBody; this.varDclsByBlock.computeIfAbsent(astBody, k -> new ArrayList<>()); for (BLangStatement astStmt : astBody.stmts) { astStmt.accept(this); } List<BIRVariableDcl> varDecls = this.varDclsByBlock.get(astBody); for (BIRVariableDcl birVariableDcl : varDecls) { birVariableDcl.endBB = this.env.enclBasicBlocks.get(this.env.enclBasicBlocks.size() - 1); } this.env.enclLoopEndBB = endLoopEndBB; this.currentBlock = prevBlock; } private BIRBasicBlock beginBreakableBlock(Location pos, BLangBlockStmt.FailureBreakMode mode) { BIRBasicBlock blockBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(blockBB); this.env.enclBasicBlocks.add(blockBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(pos, blockBB, this.currentScope); BIRBasicBlock blockEndBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(blockEndBB); blockBB.terminator = new BIRTerminator.GOTO(pos, blockEndBB, this.currentScope); this.env.enclBB = blockBB; if (mode == BLangBlockStmt.FailureBreakMode.BREAK_WITHIN_BLOCK) { this.env.enclInnerOnFailEndBB = blockEndBB; } else { this.env.enclOnFailEndBB = blockEndBB; } this.env.unlockVars.push(new BIRLockDetailsHolder()); return blockEndBB; } private void endBreakableBlock(BIRBasicBlock blockEndBB) { this.env.unlockVars.pop(); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(null, blockEndBB, this.currentScope); } this.env.enclBasicBlocks.add(blockEndBB); this.env.enclBB = blockEndBB; } @Override public void visit(BLangAnnotationAttachment astAnnotAttach) { BIRAnnotationValue annotationValue; if (astAnnotAttach.expr == null) { annotationValue = new BIRNode.BIRAnnotationLiteralValue(symTable.booleanType, true); } else { if (!isCompileTimeAnnotationValue(astAnnotAttach.expr)) { return; } annotationValue = createAnnotationValue(astAnnotAttach.expr); } Name annotTagRef = this.names.fromIdNode(astAnnotAttach.annotationName); BIRAnnotationAttachment annotAttachment = new BIRAnnotationAttachment(astAnnotAttach.pos, annotTagRef); annotAttachment.packageID = astAnnotAttach.annotationSymbol.pkgID; annotAttachment.annotValues.add(annotationValue); this.env.enclAnnotAttachments.add(annotAttachment); } private boolean isCompileTimeAnnotationValue(BLangExpression expression) { BLangExpression expr = unwrapAnnotationExpressionFromCloneReadOnly(expression); switch (expr.getKind()) { case LITERAL: case NUMERIC_LITERAL: return true; case RECORD_LITERAL_EXPR: BLangRecordLiteral recordLiteral = (BLangRecordLiteral) expr; for (RecordLiteralNode.RecordField field : recordLiteral.fields) { if (!isCompileTimeAnnotationValue(((BLangRecordKeyValueField) field).valueExpr)) { return false; } } return true; case ARRAY_LITERAL_EXPR: BLangArrayLiteral arrayLiteral = (BLangArrayLiteral) expr; for (BLangExpression bLangExpr : arrayLiteral.exprs) { if (!isCompileTimeAnnotationValue(bLangExpr)) { return false; } } return true; case TYPE_CONVERSION_EXPR: return isCompileTimeAnnotationValue(((BLangTypeConversionExpr) expr).expr); case STATEMENT_EXPRESSION: BLangStatementExpression stmtExpr = (BLangStatementExpression) expr; List<BLangStatement> stmts = ((BLangBlockStmt) stmtExpr.stmt).stmts; if (!((BLangLocalVarRef) stmtExpr.expr).varSymbol.name.value.startsWith(DESUGARED_MAPPING_CONSTR_KEY)) { return false; } for (int i = 1; i < stmts.size(); i++) { BLangAssignment assignmentStmt = (BLangAssignment) stmts.get(i); if (!isCompileTimeAnnotationValue(((BLangIndexBasedAccess) assignmentStmt.varRef).indexExpr) || !isCompileTimeAnnotationValue(assignmentStmt.expr)) { return false; } } return true; default: return false; } } private BLangExpression unwrapAnnotationExpressionFromCloneReadOnly(BLangExpression expr) { if (expr.getKind() == INVOCATION) { BLangInvocation invocation = (BLangInvocation) expr; if (invocation.name.getValue().equals(CLONE_READ_ONLY)) { return invocation.expr; } } return expr; } private BIRAnnotationValue createAnnotationValue(BLangExpression expression) { BLangExpression expr = unwrapAnnotationExpressionFromCloneReadOnly(expression); switch (expr.getKind()) { case LITERAL: case NUMERIC_LITERAL: return createAnnotationLiteralValue((BLangLiteral) expr); case RECORD_LITERAL_EXPR: return createAnnotationRecordValue((BLangRecordLiteral) expr); case ARRAY_LITERAL_EXPR: return createAnnotationArrayValue((BLangArrayLiteral) expr); case TYPE_CONVERSION_EXPR: return createAnnotationValue(((BLangTypeConversionExpr) expr).expr); case STATEMENT_EXPRESSION: return createAnnotationRecordValue((BLangStatementExpression) expr); default: throw new IllegalStateException("Invalid annotation value expression kind: " + expr.getKind()); } } private BIRNode.BIRAnnotationRecordValue createAnnotationRecordValue(BLangRecordLiteral recordLiteral) { Map<String, BIRAnnotationValue> annotValueEntryMap = new HashMap<>(); for (RecordLiteralNode.RecordField field : recordLiteral.fields) { BLangRecordKeyValueField keyValuePair = (BLangRecordKeyValueField) field; BLangLiteral keyLiteral = (BLangLiteral) keyValuePair.key.expr; String entryKey = (String) keyLiteral.value; BIRAnnotationValue annotationValue = createAnnotationValue(keyValuePair.valueExpr); annotValueEntryMap.put(entryKey, annotationValue); } return new BIRNode.BIRAnnotationRecordValue(recordLiteral.getBType(), annotValueEntryMap); } private BIRNode.BIRAnnotationRecordValue createAnnotationRecordValue(BLangStatementExpression stmtExpr) { Map<String, BIRAnnotationValue> annotValueEntryMap = new HashMap<>(); List<BLangStatement> stmts = ((BLangBlockStmt) stmtExpr.stmt).stmts; for (int i = 1; i < stmts.size(); i++) { BLangAssignment assignmentStmt = (BLangAssignment) stmts.get(i); annotValueEntryMap.put( (String) ((BLangLiteral) ((BLangIndexBasedAccess) assignmentStmt.varRef).indexExpr).value, createAnnotationValue(assignmentStmt.expr)); } return new BIRNode.BIRAnnotationRecordValue(stmtExpr.getBType(), annotValueEntryMap); } private BIRNode.BIRAnnotationArrayValue createAnnotationArrayValue(BLangArrayLiteral arrayLiteral) { BIRAnnotationValue[] annotValues = new BIRAnnotationValue[arrayLiteral.exprs.size()]; for (int exprIndex = 0; exprIndex < arrayLiteral.exprs.size(); exprIndex++) { annotValues[exprIndex] = createAnnotationValue(arrayLiteral.exprs.get(exprIndex)); } return new BIRNode.BIRAnnotationArrayValue(arrayLiteral.getBType(), annotValues); } private BIRNode.BIRAnnotationLiteralValue createAnnotationLiteralValue(BLangLiteral literalValue) { return new BIRNode.BIRAnnotationLiteralValue(literalValue.getBType(), literalValue.value); } @Override public void visit(BLangAnnotation astAnnotation) { BAnnotationSymbol annSymbol = (BAnnotationSymbol) astAnnotation.symbol; BIRAnnotation birAnn = new BIRAnnotation(astAnnotation.pos, annSymbol.name, annSymbol.originalName, annSymbol.flags, annSymbol.points, annSymbol.attachedType == null ? symTable.trueType : annSymbol.attachedType, annSymbol.origin.toBIROrigin()); birAnn.setMarkdownDocAttachment(annSymbol.markdownDocumentation); this.env.enclPkg.annotations.add(birAnn); } private boolean isWorkerSend(String chnlName, String workerName) { return chnlName.split("->")[0].equals(workerName); } @Override public void visit(BLangLambdaFunction lambdaExpr) { BIRVariableDcl tempVarLambda = new BIRVariableDcl(lambdaExpr.pos, lambdaExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP, null); this.env.enclFunc.localVars.add(tempVarLambda); BIROperand lhsOp = new BIROperand(tempVarLambda); Name funcName = getFuncName(lambdaExpr.function.symbol); List<BIRVariableDcl> params = new ArrayList<>(); lambdaExpr.function.requiredParams.forEach(param -> { BIRVariableDcl birVarDcl = new BIRVariableDcl(param.pos, param.symbol.type, this.env.nextLambdaVarId(names), VarScope.FUNCTION, VarKind.ARG, param.name.value); params.add(birVarDcl); }); BLangSimpleVariable restParam = lambdaExpr.function.restParam; if (restParam != null) { BIRVariableDcl birVarDcl = new BIRVariableDcl(restParam.pos, restParam.symbol.type, this.env.nextLambdaVarId(names), VarScope.FUNCTION, VarKind.ARG, null); params.add(birVarDcl); } setScopeAndEmit( new BIRNonTerminator.FPLoad(lambdaExpr.pos, lambdaExpr.function.symbol.pkgID, funcName, lhsOp, params, getClosureMapOperands(lambdaExpr), lambdaExpr.getBType(), lambdaExpr.function.symbol.strandName, lambdaExpr.function.symbol.schedulerPolicy)); this.env.targetOperand = lhsOp; } private List<BIROperand> getClosureMapOperands(BLangLambdaFunction lambdaExpr) { List<BIROperand> closureMaps = new ArrayList<>(); lambdaExpr.function.paramClosureMap.forEach((k, v) -> { BVarSymbol symbol = lambdaExpr.enclMapSymbols.get(k); if (symbol == null) { symbol = lambdaExpr.paramMapSymbolsOfEnclInvokable.get(k); } BIROperand varRef = new BIROperand(this.env.symbolVarMap.get(symbol)); closureMaps.add(varRef); }); return closureMaps; } private Name getFuncName(BInvokableSymbol symbol) { if (symbol.receiverSymbol == null) { return names.fromString(symbol.name.value); } int offset = symbol.receiverSymbol.type.tsymbol.name.value.length() + 1; String attachedFuncName = symbol.name.value; return names.fromString(attachedFuncName.substring(offset)); } private void addParam(BIRFunction birFunc, BLangVariable functionParam) { addParam(birFunc, functionParam.symbol, functionParam.expr, functionParam.pos); } private void addParam(BIRFunction birFunc, BVarSymbol paramSymbol, Location pos) { addParam(birFunc, paramSymbol, null, pos); } private void addParam(BIRFunction birFunc, BVarSymbol paramSymbol, BLangExpression defaultValExpr, Location pos) { BIRFunctionParameter birVarDcl = new BIRFunctionParameter(pos, paramSymbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.ARG, paramSymbol.name.value, defaultValExpr != null); birFunc.localVars.add(birVarDcl); List<BIRBasicBlock> bbsOfDefaultValueExpr = new ArrayList<>(); if (defaultValExpr != null) { BIRBasicBlock defaultExprBB = new BIRBasicBlock(this.env.nextBBId(names)); bbsOfDefaultValueExpr.add(defaultExprBB); this.env.enclBB = defaultExprBB; this.env.enclBasicBlocks = bbsOfDefaultValueExpr; defaultValExpr.accept(this); BIROperand varRef = new BIROperand(birVarDcl); setScopeAndEmit(new Move(birFunc.pos, this.env.targetOperand, varRef)); this.env.enclBB.terminator = new BIRTerminator.Return(birFunc.pos); } BIRParameter parameter = new BIRParameter(pos, paramSymbol.name, paramSymbol.flags); birFunc.requiredParams.add(parameter); birFunc.parameters.put(birVarDcl, bbsOfDefaultValueExpr); this.env.symbolVarMap.put(paramSymbol, birVarDcl); } private void addRestParam(BIRFunction birFunc, BVarSymbol paramSymbol, Location pos) { BIRFunctionParameter birVarDcl = new BIRFunctionParameter(pos, paramSymbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.ARG, paramSymbol.name.value, false); birFunc.parameters.put(birVarDcl, new ArrayList<>()); birFunc.localVars.add(birVarDcl); birFunc.restParam = new BIRParameter(pos, paramSymbol.name, paramSymbol.flags); this.env.symbolVarMap.put(paramSymbol, birVarDcl); } private void addRequiredParam(BIRFunction birFunc, BVarSymbol paramSymbol, Location pos) { BIRFunctionParameter birVarDcl = new BIRFunctionParameter(pos, paramSymbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.ARG, paramSymbol.name.value, false); birFunc.parameters.put(birVarDcl, new ArrayList<>()); birFunc.localVars.add(birVarDcl); BIRParameter parameter = new BIRParameter(pos, paramSymbol.name, paramSymbol.flags); birFunc.requiredParams.add(parameter); this.env.symbolVarMap.put(paramSymbol, birVarDcl); } @Override public void visit(BLangBlockStmt astBlockStmt) { BIRBasicBlock blockEndBB = null; BIRBasicBlock currentOnFailEndBB = this.env.enclOnFailEndBB; BIRBasicBlock currentWithinOnFailEndBB = this.env.enclInnerOnFailEndBB; BlockNode prevBlock = this.currentBlock; this.currentBlock = astBlockStmt; this.varDclsByBlock.computeIfAbsent(astBlockStmt, k -> new ArrayList<>()); if (astBlockStmt.failureBreakMode != BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE) { blockEndBB = beginBreakableBlock(astBlockStmt.pos, astBlockStmt.failureBreakMode); } for (BLangStatement astStmt : astBlockStmt.stmts) { astStmt.accept(this); } if (astBlockStmt.failureBreakMode != BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE) { endBreakableBlock(blockEndBB); } this.varDclsByBlock.get(astBlockStmt).forEach(birVariableDcl -> birVariableDcl.endBB = this.env.enclBasicBlocks.get(this.env.enclBasicBlocks.size() - 1) ); this.env.enclInnerOnFailEndBB = currentWithinOnFailEndBB; this.env.enclOnFailEndBB = currentOnFailEndBB; this.currentBlock = prevBlock; } @Override public void visit(BLangFail failNode) { if (failNode.expr == null) { if (this.env.enclInnerOnFailEndBB != null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(failNode.pos, this.env.enclInnerOnFailEndBB, this.currentScope); } return; } BIRLockDetailsHolder toUnlock = this.env.unlockVars.peek(); if (!toUnlock.isEmpty()) { BIRBasicBlock goToBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(goToBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(failNode.pos, goToBB, this.currentScope); this.env.enclBB = goToBB; } int numLocks = toUnlock.size(); while (numLocks > 0) { BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(unlockBB); BIRTerminator.Unlock unlock = new BIRTerminator.Unlock(null, unlockBB, this.currentScope); this.env.enclBB.terminator = unlock; unlock.relatedLock = toUnlock.getLock(numLocks - 1); this.env.enclBB = unlockBB; numLocks--; } BIRBasicBlock onFailBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(onFailBB); this.env.enclBasicBlocks.add(onFailBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(failNode.pos, onFailBB, this.currentScope); this.env.enclBB = onFailBB; failNode.exprStmt.accept(this); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(failNode.pos, this.env.enclOnFailEndBB, this.currentScope); } BIRBasicBlock ignoreBlock = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(ignoreBlock); ignoreBlock.terminator = new BIRTerminator.GOTO(failNode.pos, this.env.enclOnFailEndBB, this.currentScope); this.env.enclBasicBlocks.add(ignoreBlock); this.env.enclBB = ignoreBlock; } @Override public void visit(BLangSimpleVariableDef astVarDefStmt) { VarKind kind; if (astVarDefStmt.var.symbol.origin == SymbolOrigin.VIRTUAL) { kind = VarKind.SYNTHETIC; } else { kind = VarKind.LOCAL; } BIRVariableDcl birVarDcl = new BIRVariableDcl(astVarDefStmt.pos, astVarDefStmt.var.symbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, kind, astVarDefStmt.var.name.value); birVarDcl.startBB = this.env.enclBB; this.varDclsByBlock.get(this.currentBlock).add(birVarDcl); this.env.enclFunc.localVars.add(birVarDcl); this.env.symbolVarMap.put(astVarDefStmt.var.symbol, birVarDcl); BirScope newScope = new BirScope(this.currentScope.id + 1, this.currentScope); birVarDcl.insScope = newScope; this.currentScope = newScope; if (astVarDefStmt.var.expr == null) { return; } astVarDefStmt.var.expr.accept(this); BIROperand varRef = new BIROperand(birVarDcl); setScopeAndEmit(new Move(astVarDefStmt.pos, this.env.targetOperand, varRef)); birVarDcl.insOffset = this.env.enclBB.instructions.size() - 1; } @Override public void visit(BLangSimpleVariable varNode) { String name = ANNOTATION_DATA.equals(varNode.symbol.name.value) ? ANNOTATION_DATA : varNode.name.value; String originalName = ANNOTATION_DATA.equals(varNode.symbol.getOriginalName().value) ? ANNOTATION_DATA : varNode.name.originalValue; BIRGlobalVariableDcl birVarDcl = new BIRGlobalVariableDcl(varNode.pos, varNode.symbol.flags, varNode.symbol.type, varNode.symbol.pkgID, names.fromString(name), names.fromString(originalName), VarScope.GLOBAL, VarKind.GLOBAL, varNode.name.value, varNode.symbol.origin.toBIROrigin()); birVarDcl.setMarkdownDocAttachment(varNode.symbol.markdownDocumentation); this.env.enclPkg.globalVars.add(birVarDcl); this.globalVarMap.put(varNode.symbol, birVarDcl); env.enclPkg.isListenerAvailable |= Symbols.isFlagOn(varNode.symbol.flags, Flags.LISTENER); } @Override public void visit(BLangAssignment astAssignStmt) { astAssignStmt.expr.accept(this); this.varAssignment = true; astAssignStmt.varRef.accept(this); this.varAssignment = false; } @Override public void visit(BLangExpressionStmt exprStmtNode) { exprStmtNode.expr.accept(this); if (this.env.returnBB == null && exprStmtNode.expr.getKind() == NodeKind.INVOCATION && types.isNeverTypeOrStructureTypeWithARequiredNeverMember(exprStmtNode.expr.getBType())) { BIRBasicBlock returnBB = new BIRBasicBlock(this.env.nextBBId(names)); returnBB.terminator = new BIRTerminator.Return(exprStmtNode.pos); this.env.returnBB = returnBB; } } @Override public void visit(BLangInvocation invocationExpr) { createCall(invocationExpr, false); } @Override public void visit(BLangInvocation.BLangActionInvocation actionInvocation) { createCall(actionInvocation, false); } @Override public void visit(BLangStatementExpression statementExpression) { statementExpression.stmt.accept(this); statementExpression.expr.accept(this); } @Override public void visit(BLangInvocation.BLangAttachedFunctionInvocation invocationExpr) { createCall(invocationExpr, true); } @Override public void visit(BLangInvocation.BFunctionPointerInvocation invocation) { invocation.functionPointerInvocation = true; createCall(invocation, false); } @Override public void visit(BLangForkJoin forkJoin) { forkJoin.workers.forEach(worker -> worker.accept(this)); } @Override public void visit(BLangWorkerReceive workerReceive) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); String channel = workerReceive.workerIdentifier.value + "->" + env.enclFunc.workerName.value; BIRVariableDcl tempVarDcl = new BIRVariableDcl(workerReceive.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; boolean isOnSameStrand = DEFAULT_WORKER_NAME.equals(this.env.enclFunc.workerName.value); this.env.enclBB.terminator = new BIRTerminator.WorkerReceive(workerReceive.pos, names.fromString(channel), lhsOp, isOnSameStrand, thenBB, this.currentScope); this.env.enclBasicBlocks.add(thenBB); this.env.enclBB = thenBB; } @Override public void visit(BLangWorkerSend workerSend) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); this.env.enclBasicBlocks.add(thenBB); workerSend.expr.accept(this); String channelName = this.env.enclFunc.workerName.value + "->" + workerSend.workerIdentifier.value; boolean isOnSameStrand = DEFAULT_WORKER_NAME.equals(this.env.enclFunc.workerName.value); this.env.enclBB.terminator = new BIRTerminator.WorkerSend( workerSend.pos, names.fromString(channelName), this.env.targetOperand, isOnSameStrand, false, null, thenBB, this.currentScope); this.env.enclBB = thenBB; } @Override public void visit(BLangWorkerSyncSendExpr syncSend) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); syncSend.expr.accept(this); BIROperand dataOp = this.env.targetOperand; BIRVariableDcl tempVarDcl = new BIRVariableDcl(syncSend.receive.matchingSendsError, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; String channelName = this.env.enclFunc.workerName.value + "->" + syncSend.workerIdentifier.value; boolean isOnSameStrand = DEFAULT_WORKER_NAME.equals(this.env.enclFunc.workerName.value); this.env.enclBB.terminator = new BIRTerminator.WorkerSend( syncSend.pos, names.fromString(channelName), dataOp, isOnSameStrand, true, lhsOp, thenBB, this.currentScope); this.env.enclBasicBlocks.add(thenBB); this.env.enclBB = thenBB; } @Override public void visit(BLangWorkerFlushExpr flushExpr) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); BIRNode.ChannelDetails[] channels = new BIRNode.ChannelDetails[flushExpr.workerIdentifierList.size()]; int i = 0; for (BLangIdentifier workerIdentifier : flushExpr.workerIdentifierList) { String channelName = this.env.enclFunc.workerName.value + "->" + workerIdentifier.value; boolean isOnSameStrand = DEFAULT_WORKER_NAME.equals(this.env.enclFunc.workerName.value); channels[i] = new BIRNode.ChannelDetails(channelName, isOnSameStrand, true); i++; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(flushExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; this.env.enclBB.terminator = new BIRTerminator.Flush(flushExpr.pos, channels, lhsOp, thenBB, this.currentScope); this.env.enclBasicBlocks.add(thenBB); this.env.enclBB = thenBB; } private void createWait(BLangWaitExpr waitExpr) { BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); List<BIROperand> exprList = new ArrayList<>(); waitExpr.exprList.forEach(expr -> { expr.accept(this); exprList.add(this.env.targetOperand); }); BIRVariableDcl tempVarDcl = new BIRVariableDcl(waitExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; this.env.enclBB.terminator = new BIRTerminator.Wait(waitExpr.pos, exprList, lhsOp, thenBB, this.currentScope); this.env.enclBasicBlocks.add(thenBB); this.env.enclBB = thenBB; } @Override public void visit(BLangErrorConstructorExpr errorConstructorExpr) { BIRVariableDcl tempVarError = new BIRVariableDcl(errorConstructorExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarError); BIROperand lhsOp = new BIROperand(tempVarError); this.env.targetOperand = lhsOp; List<BLangExpression> positionalArgs = errorConstructorExpr.positionalArgs; positionalArgs.get(0).accept(this); BIROperand messageOp = this.env.targetOperand; positionalArgs.get(1).accept(this); BIROperand causeOp = this.env.targetOperand; errorConstructorExpr.errorDetail.accept(this); BIROperand detailsOp = this.env.targetOperand; BIRNonTerminator.NewError newError = new BIRNonTerminator.NewError(errorConstructorExpr.pos, errorConstructorExpr.getBType(), lhsOp, messageOp, causeOp, detailsOp); setScopeAndEmit(newError); this.env.targetOperand = lhsOp; } private void createCall(BLangInvocation invocationExpr, boolean isVirtual) { List<BLangExpression> requiredArgs = invocationExpr.requiredArgs; List<BLangExpression> restArgs = invocationExpr.restArgs; List<BIRArgument> args = new ArrayList<>(); boolean transactional = Symbols.isFlagOn(invocationExpr.symbol.flags, Flags.TRANSACTIONAL); for (BLangExpression requiredArg : requiredArgs) { if (requiredArg.getKind() == NodeKind.DYNAMIC_PARAM_EXPR) { ((BLangDynamicArgExpr) requiredArg).conditionalArgument.accept(this); BIROperand conditionalArg = this.env.targetOperand; ((BLangDynamicArgExpr) requiredArg).condition.accept(this); BIROperand condition = this.env.targetOperand; args.add(new BIRArgument(ArgumentState.CONDITIONALLY_PROVIDED, conditionalArg.variableDcl, condition)); } else if (requiredArg.getKind() != NodeKind.IGNORE_EXPR) { requiredArg.accept(this); args.add(new BIRArgument(ArgumentState.PROVIDED, this.env.targetOperand.variableDcl)); } else { BIRVariableDcl birVariableDcl = new BIRVariableDcl(requiredArg.getBType(), new Name("_"), VarScope.FUNCTION, VarKind.ARG); birVariableDcl.ignoreVariable = true; args.add(new BIRArgument(ArgumentState.NOT_PROVIDED, birVariableDcl)); } } for (BLangExpression arg : restArgs) { arg.accept(this); args.add(new BIRArgument(ArgumentState.PROVIDED, this.env.targetOperand.variableDcl)); } BIROperand fp = null; if (invocationExpr.functionPointerInvocation) { invocationExpr.expr.accept(this); fp = this.env.targetOperand; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(invocationExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); this.env.enclBasicBlocks.add(thenBB); if (invocationExpr.functionPointerInvocation) { this.env.enclBB.terminator = new BIRTerminator.FPCall(invocationExpr.pos, InstructionKind.FP_CALL, fp, args, lhsOp, invocationExpr.async, transactional, thenBB, this.currentScope); } else if (invocationExpr.async) { BInvokableSymbol bInvokableSymbol = (BInvokableSymbol) invocationExpr.symbol; List<BIRAnnotationAttachment> calleeAnnots = getStatementAnnotations(bInvokableSymbol.annAttachments, this.env); List<BIRAnnotationAttachment> annots = getStatementAnnotations(invocationExpr.annAttachments, this.env); this.env.enclBB.terminator = new BIRTerminator.AsyncCall(invocationExpr.pos, InstructionKind.ASYNC_CALL, isVirtual, invocationExpr.symbol.pkgID, getFuncName((BInvokableSymbol) invocationExpr.symbol), args, lhsOp, thenBB, annots, calleeAnnots, bInvokableSymbol.getFlags(), this.currentScope); } else { BInvokableSymbol bInvokableSymbol = (BInvokableSymbol) invocationExpr.symbol; List<BIRAnnotationAttachment> calleeAnnots = getStatementAnnotations(bInvokableSymbol.annAttachments, this.env); this.env.enclBB.terminator = new BIRTerminator.Call(invocationExpr.pos, InstructionKind.CALL, isVirtual, invocationExpr.symbol.pkgID, getFuncName((BInvokableSymbol) invocationExpr.symbol), args, lhsOp, thenBB, calleeAnnots, bInvokableSymbol.getFlags(), this.currentScope); } this.env.enclBB = thenBB; } @Override public void visit(BLangReturn astReturnStmt) { astReturnStmt.expr.accept(this); BIROperand retVarRef = new BIROperand(this.env.enclFunc.returnVariable); setScopeAndEmit(new Move(astReturnStmt.pos, this.env.targetOperand, retVarRef)); if (this.env.returnBB == null) { BIRBasicBlock returnBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(returnBB); returnBB.terminator = new BIRTerminator.Return(astReturnStmt.pos); this.env.returnBB = returnBB; } if (this.env.enclBB.terminator == null) { this.env.unlockVars.forEach(s -> { int i = s.size(); while (i > 0) { BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(unlockBB); BIRTerminator.Unlock unlock = new BIRTerminator.Unlock(null, unlockBB, this.currentScope); this.env.enclBB.terminator = unlock; unlock.relatedLock = s.getLock(i - 1); this.env.enclBB = unlockBB; i--; } }); this.env.enclBB.terminator = new BIRTerminator.GOTO(astReturnStmt.pos, this.env.returnBB, this.currentScope); BIRBasicBlock nextBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(nextBB); this.env.enclBB = nextBB; addToTrapStack(nextBB); } } @Override public void visit(BLangPanic panicNode) { panicNode.expr.accept(this); if (this.env.returnBB == null) { BIRBasicBlock returnBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(returnBB); returnBB.terminator = new BIRTerminator.Return(panicNode.pos); this.env.returnBB = returnBB; } this.env.enclBB.terminator = new BIRTerminator.Panic(panicNode.pos, this.env.targetOperand, this.currentScope); BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(unlockBB); this.env.enclBasicBlocks.add(unlockBB); this.env.enclBB = unlockBB; } @Override public void visit(BLangIf astIfStmt) { astIfStmt.expr.accept(this); BIROperand ifExprResult = this.env.targetOperand; BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); this.env.enclBasicBlocks.add(thenBB); BIRBasicBlock nextBB = new BIRBasicBlock(this.env.nextBBId(names)); BIRTerminator.Branch branchIns = new BIRTerminator.Branch(astIfStmt.pos, ifExprResult, thenBB, null, this.currentScope); this.env.enclBB.terminator = branchIns; this.env.enclBB = thenBB; astIfStmt.body.accept(this); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(null, nextBB, this.currentScope); } if (astIfStmt.elseStmt != null) { BIRBasicBlock elseBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(elseBB); this.env.enclBasicBlocks.add(elseBB); branchIns.falseBB = elseBB; this.env.enclBB = elseBB; astIfStmt.elseStmt.accept(this); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(null, nextBB, this.currentScope); } } else { branchIns.falseBB = nextBB; } addToTrapStack(nextBB); this.env.enclBasicBlocks.add(nextBB); this.env.enclBB = nextBB; } @Override public void visit(BLangWhile astWhileStmt) { BIRBasicBlock currentEnclLoopBB = this.env.enclLoopBB; BIRBasicBlock currentEnclLoopEndBB = this.env.enclLoopEndBB; BIRBasicBlock whileExprBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(whileExprBB); this.env.enclBasicBlocks.add(whileExprBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(astWhileStmt.pos, whileExprBB, this.currentScope); this.env.enclBB = whileExprBB; astWhileStmt.expr.accept(this); BIROperand whileExprResult = this.env.targetOperand; BIRBasicBlock whileBodyBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(whileBodyBB); this.env.enclBasicBlocks.add(whileBodyBB); BIRBasicBlock whileEndBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(whileEndBB); this.env.enclBB.terminator = new BIRTerminator.Branch(astWhileStmt.pos, whileExprResult, whileBodyBB, whileEndBB, this.currentScope); this.env.enclBB = whileBodyBB; this.env.enclLoopBB = whileExprBB; this.env.enclLoopEndBB = whileEndBB; this.env.unlockVars.push(new BIRLockDetailsHolder()); astWhileStmt.body.accept(this); this.env.unlockVars.pop(); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(null, whileExprBB, this.currentScope); } this.env.enclBasicBlocks.add(whileEndBB); this.env.enclBB = whileEndBB; this.env.enclLoopBB = currentEnclLoopBB; this.env.enclLoopEndBB = currentEnclLoopEndBB; } @Override public void visit(BLangIgnoreExpr ignoreExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(ignoreExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); } @Override public void visit(BLangLiteral astLiteralExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(astLiteralExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.ConstantLoad(astLiteralExpr.pos, astLiteralExpr.value, astLiteralExpr.getBType(), toVarRef)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangMapLiteral astMapLiteralExpr) { visitTypedesc(astMapLiteralExpr.pos, astMapLiteralExpr.getBType(), Collections.emptyList()); BIRVariableDcl tempVarDcl = new BIRVariableDcl(astMapLiteralExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.NewStructure(astMapLiteralExpr.pos, toVarRef, this.env.targetOperand, generateMappingConstructorEntries(astMapLiteralExpr.fields))); this.env.targetOperand = toVarRef; } @Override public void visit(BLangTypeConversionExpr astTypeConversionExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(astTypeConversionExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); astTypeConversionExpr.expr.accept(this); BIROperand rhsOp = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.TypeCast(astTypeConversionExpr.pos, toVarRef, rhsOp, toVarRef.variableDcl.type, astTypeConversionExpr.checkTypes)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangStructLiteral astStructLiteralExpr) { List<BIROperand> varDcls = mapToVarDcls(astStructLiteralExpr.enclMapSymbols); visitTypedesc(astStructLiteralExpr.pos, astStructLiteralExpr.getBType(), varDcls); BIRVariableDcl tempVarDcl = new BIRVariableDcl(astStructLiteralExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); BIRNonTerminator.NewStructure instruction = new BIRNonTerminator.NewStructure(astStructLiteralExpr.pos, toVarRef, this.env.targetOperand, generateMappingConstructorEntries(astStructLiteralExpr.fields)); setScopeAndEmit(instruction); this.env.targetOperand = toVarRef; } private List<BIROperand> mapToVarDcls(TreeMap<Integer, BVarSymbol> enclMapSymbols) { if (enclMapSymbols == null || enclMapSymbols.size() == 0) { return Collections.emptyList(); } ArrayList<BIROperand> varDcls = new ArrayList<>(enclMapSymbols.size()); for (BVarSymbol varSymbol : enclMapSymbols.values()) { BIRVariableDcl varDcl = this.env.symbolVarMap.get(varSymbol); varDcls.add(new BIROperand(varDcl)); } return varDcls; } @Override public void visit(BLangTypeInit connectorInitExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(connectorInitExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); BTypeSymbol objectTypeSymbol = getObjectTypeSymbol(connectorInitExpr.getBType()); BIRNonTerminator.NewInstance instruction; if (isInSamePackage(objectTypeSymbol, env.enclPkg.packageID)) { BIRTypeDefinition def = typeDefs.get(objectTypeSymbol); instruction = new BIRNonTerminator.NewInstance(connectorInitExpr.pos, def, toVarRef); } else { BType connectorInitExprType = types.getReferredType(connectorInitExpr.getBType()); BType objectType = connectorInitExprType.tag != TypeTags.UNION ? connectorInitExprType : ((BUnionType) connectorInitExprType).getMemberTypes().stream() .filter(bType -> bType.tag != TypeTags.ERROR) .findFirst() .get(); String objectName = objectType.tsymbol.name.value; instruction = new BIRNonTerminator.NewInstance(connectorInitExpr.pos, objectTypeSymbol.pkgID, objectName, toVarRef); } setScopeAndEmit(instruction); this.env.targetOperand = toVarRef; } private boolean isInSamePackage(BSymbol objectTypeSymbol, PackageID packageID) { return objectTypeSymbol.pkgID.equals(packageID); } @Override public void visit(BLangSimpleVarRef.BLangFieldVarRef fieldVarRef) { } @Override public void visit(BLangArrayLiteral astArrayLiteralExpr) { generateListConstructorExpr(astArrayLiteralExpr); } @Override public void visit(BLangTupleLiteral tupleLiteral) { generateListConstructorExpr(tupleLiteral); } @Override public void visit(BLangGroupExpr groupExpr) { groupExpr.expression.accept(this); } @Override public void visit(BLangJSONArrayLiteral jsonArrayLiteralExpr) { generateListConstructorExpr(jsonArrayLiteralExpr); } @Override public void visit(BLangMapAccessExpr astMapAccessExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; BIROperand rhsOp = this.env.targetOperand; astMapAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astMapAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (variableStore) { setScopeAndEmit( new BIRNonTerminator.FieldAccess(astMapAccessExpr.pos, InstructionKind.MAP_STORE, varRefRegIndex, keyRegIndex, rhsOp, astMapAccessExpr.isStoreOnCreation)); return; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(astMapAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.FieldAccess(astMapAccessExpr.pos, InstructionKind.MAP_LOAD, tempVarRef, keyRegIndex, varRefRegIndex, astMapAccessExpr.optionalFieldAccess, astMapAccessExpr.isLValue && !astMapAccessExpr.leafNode)); this.env.targetOperand = tempVarRef; this.varAssignment = variableStore; } @Override public void visit(BLangTableAccessExpr astTableAccessExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; BIROperand rhsOp = this.env.targetOperand; astTableAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astTableAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (variableStore) { setScopeAndEmit(new BIRNonTerminator.FieldAccess(astTableAccessExpr.pos, InstructionKind.TABLE_STORE, varRefRegIndex, keyRegIndex, rhsOp)); return; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(astTableAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.FieldAccess(astTableAccessExpr.pos, InstructionKind.TABLE_LOAD, tempVarRef, keyRegIndex, varRefRegIndex)); this.env.targetOperand = tempVarRef; this.varAssignment = variableStore; } @Override public void visit(BLangStructFieldAccessExpr astStructFieldAccessExpr) { generateMappingAccess(astStructFieldAccessExpr, astStructFieldAccessExpr.optionalFieldAccess); } @Override public void visit(BLangJSONAccessExpr astJSONFieldAccessExpr) { if (astJSONFieldAccessExpr.indexExpr.getBType().tag == TypeTags.INT) { generateArrayAccess(astJSONFieldAccessExpr); return; } generateMappingAccess(astJSONFieldAccessExpr, astJSONFieldAccessExpr.optionalFieldAccess); } @Override public void visit(BLangDynamicArgExpr dynamicParamExpr) { dynamicParamExpr.condition.accept(this); dynamicParamExpr.conditionalArgument.accept(this); } @Override public void visit(BLangStringAccessExpr stringAccessExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(stringAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); stringAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; stringAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; setScopeAndEmit(new BIRNonTerminator.FieldAccess(stringAccessExpr.pos, InstructionKind.STRING_LOAD, tempVarRef, keyRegIndex, varRefRegIndex)); this.env.targetOperand = tempVarRef; } @Override public void visit(BLangArrayAccessExpr astArrayAccessExpr) { generateArrayAccess(astArrayAccessExpr); } @Override public void visit(BLangIndexBasedAccess.BLangTupleAccessExpr tupleAccessExpr) { generateArrayAccess(tupleAccessExpr); } @Override public void visit(BLangIsLikeExpr isLikeExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.booleanType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); isLikeExpr.expr.accept(this); BIROperand exprIndex = this.env.targetOperand; setScopeAndEmit(new BIRNonTerminator.IsLike(isLikeExpr.pos, isLikeExpr.typeNode.getBType(), toVarRef, exprIndex)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangTypeTestExpr typeTestExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.booleanType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); typeTestExpr.expr.accept(this); BIROperand exprIndex = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.TypeTest(typeTestExpr.pos, typeTestExpr.typeNode.getBType(), toVarRef, exprIndex)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangLocalVarRef astVarRefExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; BSymbol varSymbol = astVarRefExpr.symbol; if (variableStore) { if (astVarRefExpr.symbol.name != Names.IGNORE) { BIROperand varRef = new BIROperand(this.env.symbolVarMap.get(varSymbol)); setScopeAndEmit(new Move(astVarRefExpr.pos, this.env.targetOperand, varRef)); } } else { BIRVariableDcl tempVarDcl = new BIRVariableDcl(varSymbol.type, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); BIRVariableDcl varDecl = this.env.symbolVarMap.get(varSymbol);; BIROperand fromVarRef = new BIROperand(varDecl); setScopeAndEmit(new Move(astVarRefExpr.pos, fromVarRef, tempVarRef)); this.env.targetOperand = tempVarRef; } this.varAssignment = variableStore; } @Override public void visit(BLangPackageVarRef astPackageVarRefExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; if (variableStore) { if (astPackageVarRefExpr.symbol.name != Names.IGNORE) { BIROperand varRef = new BIROperand(getVarRef(astPackageVarRefExpr)); setScopeAndEmit(new Move(astPackageVarRefExpr.pos, this.env.targetOperand, varRef)); } } else { BIRVariableDcl tempVarDcl = new BIRVariableDcl(astPackageVarRefExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); BIROperand fromVarRef = new BIROperand(getVarRef(astPackageVarRefExpr)); setScopeAndEmit(new Move(astPackageVarRefExpr.pos, fromVarRef, tempVarRef)); this.env.targetOperand = tempVarRef; } this.varAssignment = variableStore; } private BIRGlobalVariableDcl getVarRef(BLangPackageVarRef astPackageVarRefExpr) { BSymbol symbol = astPackageVarRefExpr.symbol; if ((symbol.tag & SymTag.CONSTANT) == SymTag.CONSTANT || !isInSamePackage(astPackageVarRefExpr.varSymbol, env.enclPkg.packageID)) { return new BIRGlobalVariableDcl(astPackageVarRefExpr.pos, symbol.flags, symbol.type, symbol.pkgID, symbol.name, symbol.getOriginalName(), VarScope.GLOBAL, VarKind.CONSTANT, symbol.name.value, symbol.origin.toBIROrigin()); } return this.globalVarMap.get(symbol); } @Override public void visit(BLangBinaryExpr astBinaryExpr) { astBinaryExpr.lhsExpr.accept(this); BIROperand rhsOp1 = this.env.targetOperand; astBinaryExpr.rhsExpr.accept(this); BIROperand rhsOp2 = this.env.targetOperand; BIRVariableDcl tempVarDcl = new BIRVariableDcl(astBinaryExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); this.env.targetOperand = lhsOp; BinaryOp binaryIns = new BinaryOp(astBinaryExpr.pos, getBinaryInstructionKind(astBinaryExpr.opKind), astBinaryExpr.getBType(), lhsOp, rhsOp1, rhsOp2); setScopeAndEmit(binaryIns); } @Override public void visit(BLangUnaryExpr unaryExpr) { unaryExpr.expr.accept(this); BIROperand rhsOp = this.env.targetOperand; BIRVariableDcl tempVarDcl = new BIRVariableDcl(unaryExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand lhsOp = new BIROperand(tempVarDcl); if (OperatorKind.ADD.equals(unaryExpr.operator) || OperatorKind.UNTAINT.equals(unaryExpr.operator)) { setScopeAndEmit(new Move(unaryExpr.pos, rhsOp, lhsOp)); this.env.targetOperand = lhsOp; return; } UnaryOP unaryIns = new UnaryOP(unaryExpr.pos, getUnaryInstructionKind(unaryExpr.operator), lhsOp, rhsOp); setScopeAndEmit(unaryIns); this.env.targetOperand = lhsOp; } @Override public void visit(BLangTrapExpr trapExpr) { BIRBasicBlock trapBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(trapBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(trapExpr.pos, trapBB, this.currentScope); this.env.enclBB = trapBB; this.env.trapBlocks.push(new ArrayList<>()); addToTrapStack(trapBB); trapExpr.expr.accept(this); List<BIRBasicBlock> trappedBlocks = this.env.trapBlocks.pop(); BIRBasicBlock nextBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(nextBB); env.enclBasicBlocks.add(nextBB); if (this.env.enclBB.terminator == null) { this.env.enclBB.terminator = new BIRTerminator.GOTO(trapExpr.pos, nextBB, this.currentScope); } env.enclFunc.errorTable.add(new BIRNode.BIRErrorEntry(trappedBlocks.get(0), trappedBlocks.get(trappedBlocks.size() - 1), env.targetOperand, nextBB)); this.env.enclBB = nextBB; } @Override public void visit(BLangWaitExpr waitExpr) { createWait(waitExpr); } @Override public void visit(BLangWaitForAllExpr.BLangWaitLiteral waitLiteral) { visitTypedesc(waitLiteral.pos, waitLiteral.getBType(), Collections.emptyList()); BIRBasicBlock thenBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(thenBB); BIRVariableDcl tempVarDcl = new BIRVariableDcl(waitLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.NewStructure(waitLiteral.pos, toVarRef, this.env.targetOperand)); this.env.targetOperand = toVarRef; List<String> keys = new ArrayList<>(); List<BIROperand> valueExprs = new ArrayList<>(); for (BLangWaitForAllExpr.BLangWaitKeyValue keyValue : waitLiteral.keyValuePairs) { keys.add(keyValue.key.value); BLangExpression expr = keyValue.valueExpr != null ? keyValue.valueExpr : keyValue.keyExpr; expr.accept(this); BIROperand valueRegIndex = this.env.targetOperand; valueExprs.add(valueRegIndex); } this.env.enclBB.terminator = new BIRTerminator.WaitAll(waitLiteral.pos, toVarRef, keys, valueExprs, thenBB, this.currentScope); this.env.targetOperand = toVarRef; this.env.enclFunc.basicBlocks.add(thenBB); this.env.enclBB = thenBB; } @Override public void visit(BLangIsAssignableExpr assignableExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.booleanType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); assignableExpr.lhsExpr.accept(this); BIROperand exprIndex = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.TypeTest(assignableExpr.pos, assignableExpr.targetType, toVarRef, exprIndex)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLQName xmlQName) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.anyType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); if (!xmlQName.isUsedInXML) { String qName = xmlQName.namespaceURI == null ? xmlQName.localname.value : ("{" + xmlQName.namespaceURI + "}" + xmlQName.localname); generateStringLiteral(qName); return; } BIROperand nsURIIndex = generateStringLiteral(xmlQName.namespaceURI); BIROperand localnameIndex = generateStringLiteral(xmlQName.localname.value); BIROperand prefixIndex = generateStringLiteral(xmlQName.prefix.value); BIRNonTerminator.NewXMLQName newXMLQName = new BIRNonTerminator.NewXMLQName(xmlQName.pos, toVarRef, localnameIndex, nsURIIndex, prefixIndex); setScopeAndEmit(newXMLQName); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLElementLiteral xmlElementLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlElementLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); xmlElementLiteral.inlineNamespaces.forEach(xmlns -> xmlns.accept(this)); BLangExpression startTagName = (BLangExpression) xmlElementLiteral.getStartTagName(); startTagName.accept(this); BIROperand startTagNameIndex = this.env.targetOperand; BIROperand defaultNsURIVarRef = generateNamespaceRef(xmlElementLiteral.defaultNsSymbol, xmlElementLiteral.pos); BIRNonTerminator.NewXMLElement newXMLElement = new BIRNonTerminator.NewXMLElement(xmlElementLiteral.pos, toVarRef, startTagNameIndex, defaultNsURIVarRef, Symbols.isFlagOn(xmlElementLiteral.getBType().flags, Flags.READONLY)); setScopeAndEmit(newXMLElement); populateXML(xmlElementLiteral, toVarRef); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLAttribute attribute) { BIROperand xmlVarRef = this.env.targetOperand; attribute.name.accept(this); BIROperand attrNameOp = this.env.targetOperand; attribute.value.accept(this); BIROperand attrValueOp = this.env.targetOperand; setScopeAndEmit(new BIRNonTerminator.FieldAccess(attribute.pos, InstructionKind.XML_ATTRIBUTE_STORE, xmlVarRef, attrNameOp, attrValueOp)); } @Override public void visit(BLangXMLSequenceLiteral xmlSequenceLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlSequenceLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); BIRNonTerminator.NewXMLSequence newXMLSequence = new BIRNonTerminator.NewXMLSequence(xmlSequenceLiteral.pos, toVarRef); setScopeAndEmit(newXMLSequence); populateXMLSequence(xmlSequenceLiteral, toVarRef); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLTextLiteral xmlTextLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlTextLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); xmlTextLiteral.concatExpr.accept(this); BIROperand xmlTextIndex = this.env.targetOperand; BIRNonTerminator.NewXMLText newXMLElement = new BIRNonTerminator.NewXMLText(xmlTextLiteral.pos, toVarRef, xmlTextIndex); setScopeAndEmit(newXMLElement); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLCommentLiteral xmlCommentLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlCommentLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); xmlCommentLiteral.concatExpr.accept(this); BIROperand xmlCommentIndex = this.env.targetOperand; BIRNonTerminator.NewXMLComment newXMLComment = new BIRNonTerminator.NewXMLComment(xmlCommentLiteral.pos, toVarRef, xmlCommentIndex, Symbols.isFlagOn(xmlCommentLiteral.getBType().flags, Flags.READONLY)); setScopeAndEmit(newXMLComment); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLProcInsLiteral xmlProcInsLiteral) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(xmlProcInsLiteral.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); xmlProcInsLiteral.dataConcatExpr.accept(this); BIROperand dataIndex = this.env.targetOperand; xmlProcInsLiteral.target.accept(this); BIROperand targetIndex = this.env.targetOperand; BIRNonTerminator.NewXMLProcIns newXMLProcIns = new BIRNonTerminator.NewXMLProcIns(xmlProcInsLiteral.pos, toVarRef, dataIndex, targetIndex, Symbols.isFlagOn(xmlProcInsLiteral.getBType().flags, Flags.READONLY)); setScopeAndEmit(newXMLProcIns); this.env.targetOperand = toVarRef; } @Override public void visit(BLangXMLQuotedString xmlQuotedString) { xmlQuotedString.concatExpr.accept(this); } @Override public void visit(BLangXMLNSStatement xmlnsStmtNode) { xmlnsStmtNode.xmlnsDecl.accept(this); } @Override public void visit(BLangXMLNS xmlnsNode) { } @Override public void visit(BLangLocalXMLNS xmlnsNode) { generateXMLNamespace(xmlnsNode); } @Override public void visit(BLangPackageXMLNS xmlnsNode) { generateXMLNamespace(xmlnsNode); } @Override public void visit(BLangXMLAccessExpr xmlAccessExpr) { generateMappingAccess(xmlAccessExpr, false); } @Override public void visit(BLangTypedescExpr accessExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(accessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.NewTypeDesc(accessExpr.pos, toVarRef, accessExpr.resolvedType, Collections.emptyList())); this.env.targetOperand = toVarRef; } @Override public void visit(BLangTableConstructorExpr tableConstructorExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(tableConstructorExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); BLangArrayLiteral keySpecifierLiteral = new BLangArrayLiteral(); keySpecifierLiteral.pos = tableConstructorExpr.pos; keySpecifierLiteral.setBType(symTable.stringArrayType); keySpecifierLiteral.exprs = new ArrayList<>(); BTableType type = (BTableType) tableConstructorExpr.getBType(); if (type.fieldNameList != null) { type.fieldNameList.forEach(col -> { BLangLiteral colLiteral = new BLangLiteral(); colLiteral.pos = tableConstructorExpr.pos; colLiteral.setBType(symTable.stringType); colLiteral.value = col; keySpecifierLiteral.exprs.add(colLiteral); }); } keySpecifierLiteral.accept(this); BIROperand keyColOp = this.env.targetOperand; BLangArrayLiteral dataLiteral = new BLangArrayLiteral(); dataLiteral.pos = tableConstructorExpr.pos; dataLiteral.setBType(new BArrayType(((BTableType) tableConstructorExpr.getBType()).constraint)); dataLiteral.exprs = new ArrayList<>(tableConstructorExpr.recordLiteralList); dataLiteral.accept(this); BIROperand dataOp = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.NewTable(tableConstructorExpr.pos, tableConstructorExpr.getBType(), toVarRef, keyColOp, dataOp)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangSimpleVarRef.BLangTypeLoad typeLoad) { visitTypedesc(typeLoad.pos, typeLoad.symbol.type, Collections.emptyList()); } private void visitTypedesc(Location pos, BType type, List<BIROperand> varDcls) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(symTable.typeDesc, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind .TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.NewTypeDesc(pos, toVarRef, type, varDcls)); this.env.targetOperand = toVarRef; } @Override public void visit(BLangBreak breakStmt) { BIRLockDetailsHolder toUnlock = this.env.unlockVars.peek(); if (!toUnlock.isEmpty()) { BIRBasicBlock goToBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(goToBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(breakStmt.pos, goToBB, this.currentScope); this.env.enclBB = goToBB; } int numLocks = toUnlock.size(); while (numLocks > 0) { BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(unlockBB); BIRTerminator.Unlock unlock = new BIRTerminator.Unlock(null, unlockBB, this.currentScope); this.env.enclBB.terminator = unlock; unlock.relatedLock = toUnlock.getLock(numLocks - 1); this.env.enclBB = unlockBB; numLocks--; } this.env.enclBB.terminator = new BIRTerminator.GOTO(breakStmt.pos, this.env.enclLoopEndBB, this.currentScope); } @Override public void visit(BLangContinue continueStmt) { BIRLockDetailsHolder toUnlock = this.env.unlockVars.peek(); if (!toUnlock.isEmpty()) { BIRBasicBlock goToBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(goToBB); this.env.enclBB.terminator = new BIRTerminator.GOTO(continueStmt.pos, goToBB, this.currentScope); this.env.enclBB = goToBB; } int numLocks = toUnlock.size(); while (numLocks > 0) { BIRBasicBlock unlockBB = new BIRBasicBlock(this.env.nextBBId(names)); this.env.enclBasicBlocks.add(unlockBB); BIRTerminator.Unlock unlock = new BIRTerminator.Unlock(null, unlockBB, this.currentScope); this.env.enclBB.terminator = unlock; BIRTerminator.Lock lock = toUnlock.getLock(numLocks - 1); unlock.relatedLock = lock; this.env.enclBB = unlockBB; numLocks--; } this.env.enclBB.terminator = new BIRTerminator.GOTO(continueStmt.pos, this.env.enclLoopBB, this.currentScope); } @Override public void visit(BLangFunctionVarRef fpVarRef) { generateFPVarRef(fpVarRef, (BInvokableSymbol) fpVarRef.symbol); } @Override public void visit(BLangStructFunctionVarRef structFpVarRef) { generateFPVarRef(structFpVarRef, (BInvokableSymbol) structFpVarRef.symbol); } @Override public void visit(BLangLockStmt lockStmt) { BIRBasicBlock lockedBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(lockedBB); this.env.enclBasicBlocks.add(lockedBB); BIRTerminator.Lock lock = new BIRTerminator.Lock(lockStmt.pos, lockedBB, this.currentScope); this.env.enclBB.terminator = lock; lockStmtMap.put(lockStmt, lock); this.env.unlockVars.peek().addLock(lock); populateBirLockWithGlobalVars(lockStmt); this.env.enclBB = lockedBB; } private void populateBirLockWithGlobalVars(BLangLockStmt lockStmt) { for (BVarSymbol globalVar : lockStmt.lockVariables) { BIRGlobalVariableDcl birGlobalVar = this.globalVarMap.get(globalVar); if (birGlobalVar == null) { birGlobalVar = dummyGlobalVarMapForLocks.computeIfAbsent(globalVar, k -> new BIRGlobalVariableDcl(null, globalVar.flags, globalVar.type, globalVar.pkgID, globalVar.name, globalVar.getOriginalName(), VarScope.GLOBAL, VarKind.GLOBAL, globalVar.name.value, globalVar.origin.toBIROrigin())); } ((BIRTerminator.Lock) this.env.enclBB.terminator).lockVariables.add(birGlobalVar); } } @Override public void visit(BLangUnLockStmt unLockStmt) { BIRLockDetailsHolder lockDetailsHolder = this.env.unlockVars.peek(); if (lockDetailsHolder.isEmpty()) { return; } BIRBasicBlock unLockedBB = new BIRBasicBlock(this.env.nextBBId(names)); addToTrapStack(unLockedBB); this.env.enclBasicBlocks.add(unLockedBB); this.env.enclBB.terminator = new BIRTerminator.Unlock(unLockStmt.pos, unLockedBB, this.currentScope); ((BIRTerminator.Unlock) this.env.enclBB.terminator).relatedLock = lockStmtMap.get(unLockStmt.relatedLock); this.env.enclBB = unLockedBB; lockDetailsHolder.removeLastLock(); } private void setScopeAndEmit(BIRNonTerminator instruction) { instruction.scope = this.currentScope; this.env.enclBB.instructions.add(instruction); } private InstructionKind getBinaryInstructionKind(OperatorKind opKind) { switch (opKind) { case ADD: return InstructionKind.ADD; case SUB: return InstructionKind.SUB; case MUL: return InstructionKind.MUL; case DIV: return InstructionKind.DIV; case MOD: return InstructionKind.MOD; case EQUAL: case EQUALS: return InstructionKind.EQUAL; case NOT_EQUAL: return InstructionKind.NOT_EQUAL; case GREATER_THAN: return InstructionKind.GREATER_THAN; case GREATER_EQUAL: return InstructionKind.GREATER_EQUAL; case LESS_THAN: return InstructionKind.LESS_THAN; case LESS_EQUAL: return InstructionKind.LESS_EQUAL; case AND: return InstructionKind.AND; case OR: return InstructionKind.OR; case REF_EQUAL: return InstructionKind.REF_EQUAL; case REF_NOT_EQUAL: return InstructionKind.REF_NOT_EQUAL; case CLOSED_RANGE: return InstructionKind.CLOSED_RANGE; case HALF_OPEN_RANGE: return InstructionKind.HALF_OPEN_RANGE; case ANNOT_ACCESS: return InstructionKind.ANNOT_ACCESS; case BITWISE_AND: return InstructionKind.BITWISE_AND; case BITWISE_OR: return InstructionKind.BITWISE_OR; case BITWISE_XOR: return InstructionKind.BITWISE_XOR; case BITWISE_LEFT_SHIFT: return InstructionKind.BITWISE_LEFT_SHIFT; case BITWISE_RIGHT_SHIFT: return InstructionKind.BITWISE_RIGHT_SHIFT; case BITWISE_UNSIGNED_RIGHT_SHIFT: return InstructionKind.BITWISE_UNSIGNED_RIGHT_SHIFT; default: throw new IllegalStateException("unsupported binary operation: " + opKind.value()); } } private InstructionKind getUnaryInstructionKind(OperatorKind opKind) { switch (opKind) { case TYPEOF: return InstructionKind.TYPEOF; case NOT: return InstructionKind.NOT; case SUB: return InstructionKind.NEGATE; case ADD: return InstructionKind.MOVE; default: throw new IllegalStateException("unsupported unary operator: " + opKind.value()); } } private void generateListConstructorExpr(BLangListConstructorExpr listConstructorExpr) { BIRVariableDcl tempVarDcl = new BIRVariableDcl(listConstructorExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand toVarRef = new BIROperand(tempVarDcl); long size = -1L; List<BLangExpression> exprs = listConstructorExpr.exprs; BType listConstructorExprType = types.getReferredType(listConstructorExpr.getBType()); if (listConstructorExprType.tag == TypeTags.ARRAY && ((BArrayType) listConstructorExprType).state != BArrayState.OPEN) { size = ((BArrayType) listConstructorExprType).size; } else if (listConstructorExprType.tag == TypeTags.TUPLE) { size = exprs.size(); } BLangLiteral literal = new BLangLiteral(); literal.pos = listConstructorExpr.pos; literal.value = size; literal.setBType(symTable.intType); literal.accept(this); BIROperand sizeOp = this.env.targetOperand; List<BIROperand> valueOperands = new ArrayList<>(exprs.size()); for (BLangExpression expr : exprs) { expr.accept(this); valueOperands.add(this.env.targetOperand); } setScopeAndEmit( new BIRNonTerminator.NewArray(listConstructorExpr.pos, listConstructorExprType, toVarRef, sizeOp, valueOperands)); this.env.targetOperand = toVarRef; } private void generateArrayAccess(BLangIndexBasedAccess astArrayAccessExpr) { boolean variableStore = this.varAssignment; this.varAssignment = false; BIROperand rhsOp = this.env.targetOperand; astArrayAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astArrayAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (variableStore) { setScopeAndEmit(new BIRNonTerminator.FieldAccess(astArrayAccessExpr.pos, InstructionKind.ARRAY_STORE, varRefRegIndex, keyRegIndex, rhsOp)); return; } BIRVariableDcl tempVarDcl = new BIRVariableDcl(astArrayAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); setScopeAndEmit(new BIRNonTerminator.FieldAccess(astArrayAccessExpr.pos, InstructionKind.ARRAY_LOAD, tempVarRef, keyRegIndex, varRefRegIndex, false, astArrayAccessExpr.isLValue && !astArrayAccessExpr.leafNode)); this.env.targetOperand = tempVarRef; this.varAssignment = variableStore; } private void generateMappingAccess(BLangIndexBasedAccess astIndexBasedAccessExpr, boolean except) { boolean variableStore = this.varAssignment; this.varAssignment = false; InstructionKind insKind; BType astAccessExprExprType = types.getReferredType(astIndexBasedAccessExpr.expr.getBType()); if (variableStore) { BIROperand rhsOp = this.env.targetOperand; astIndexBasedAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astIndexBasedAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (astIndexBasedAccessExpr.getKind() == NodeKind.XML_ATTRIBUTE_ACCESS_EXPR) { insKind = InstructionKind.XML_ATTRIBUTE_STORE; keyRegIndex = getQNameOP(astIndexBasedAccessExpr.indexExpr, keyRegIndex); } else if (astAccessExprExprType.tag == TypeTags.OBJECT || (astAccessExprExprType.tag == TypeTags.UNION && ((BUnionType) astAccessExprExprType).getMemberTypes().iterator() .next().tag == TypeTags.OBJECT)) { insKind = InstructionKind.OBJECT_STORE; } else { insKind = InstructionKind.MAP_STORE; } setScopeAndEmit( new BIRNonTerminator.FieldAccess(astIndexBasedAccessExpr.pos, insKind, varRefRegIndex, keyRegIndex, rhsOp, astIndexBasedAccessExpr.isStoreOnCreation)); } else { BIRVariableDcl tempVarDcl = new BIRVariableDcl(astIndexBasedAccessExpr.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarDcl); BIROperand tempVarRef = new BIROperand(tempVarDcl); astIndexBasedAccessExpr.expr.accept(this); BIROperand varRefRegIndex = this.env.targetOperand; astIndexBasedAccessExpr.indexExpr.accept(this); BIROperand keyRegIndex = this.env.targetOperand; if (astIndexBasedAccessExpr.getKind() == NodeKind.XML_ATTRIBUTE_ACCESS_EXPR) { insKind = InstructionKind.XML_ATTRIBUTE_LOAD; keyRegIndex = getQNameOP(astIndexBasedAccessExpr.indexExpr, keyRegIndex); } else if (TypeTags.isXMLTypeTag(astAccessExprExprType.tag)) { generateXMLAccess((BLangXMLAccessExpr) astIndexBasedAccessExpr, tempVarRef, varRefRegIndex, keyRegIndex); this.varAssignment = variableStore; return; } else if (astAccessExprExprType.tag == TypeTags.OBJECT || (astAccessExprExprType.tag == TypeTags.UNION && ((BUnionType) astAccessExprExprType).getMemberTypes().iterator() .next().tag == TypeTags.OBJECT)) { insKind = InstructionKind.OBJECT_LOAD; } else { insKind = InstructionKind.MAP_LOAD; } setScopeAndEmit( new BIRNonTerminator.FieldAccess(astIndexBasedAccessExpr.pos, insKind, tempVarRef, keyRegIndex, varRefRegIndex, except, astIndexBasedAccessExpr.isLValue && !astIndexBasedAccessExpr.leafNode)); this.env.targetOperand = tempVarRef; } this.varAssignment = variableStore; } private BTypeSymbol getObjectTypeSymbol(BType objType) { BType type = types.getReferredType(objType); if (type.tag == TypeTags.UNION) { return ((BUnionType) type).getMemberTypes().stream() .filter(t -> t.tag == TypeTags.OBJECT) .findFirst() .orElse(symTable.noType).tsymbol; } return type.tsymbol; } private BIROperand generateStringLiteral(String value) { BLangLiteral prefixLiteral = (BLangLiteral) TreeBuilder.createLiteralExpression(); prefixLiteral.value = value; if (value == null) { prefixLiteral.setBType(symTable.nilType); } else { prefixLiteral.setBType(symTable.stringType); } prefixLiteral.accept(this); return this.env.targetOperand; } private void generateXMLNamespace(BLangXMLNS xmlnsNode) { BIRVariableDcl birVarDcl = new BIRVariableDcl(xmlnsNode.pos, symTable.stringType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.LOCAL, null); this.env.enclFunc.localVars.add(birVarDcl); this.env.symbolVarMap.put(xmlnsNode.symbol, birVarDcl); xmlnsNode.namespaceURI.accept(this); BIROperand varRef = new BIROperand(birVarDcl); setScopeAndEmit(new Move(xmlnsNode.pos, this.env.targetOperand, varRef)); } private BIROperand generateNamespaceRef(BXMLNSSymbol nsSymbol, Location pos) { if (nsSymbol == null) { return generateStringLiteral(null); } int ownerTag = nsSymbol.owner.tag; if ((ownerTag & SymTag.PACKAGE) == SymTag.PACKAGE || (ownerTag & SymTag.OBJECT) == SymTag.OBJECT || (ownerTag & SymTag.RECORD) == SymTag.RECORD) { return generateStringLiteral(nsSymbol.namespaceURI); } BIRVariableDcl nsURIVarDcl = new BIRVariableDcl(symTable.stringType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(nsURIVarDcl); BIROperand nsURIVarRef = new BIROperand(nsURIVarDcl); BIRVariableDcl varDecl = this.env.symbolVarMap.get(nsSymbol); BIROperand fromVarRef = new BIROperand(varDecl); setScopeAndEmit(new Move(pos, fromVarRef, nsURIVarRef)); return nsURIVarRef; } private void populateXMLSequence(BLangXMLSequenceLiteral xmlSequenceLiteral, BIROperand toVarRef) { for (BLangExpression xmlItem : xmlSequenceLiteral.xmlItems) { xmlItem.accept(this); BIROperand childOp = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.XMLAccess(xmlItem.pos, InstructionKind.XML_SEQ_STORE, toVarRef, childOp)); } } private void populateXML(BLangXMLElementLiteral xmlElementLiteral, BIROperand toVarRef) { xmlElementLiteral.namespacesInScope.forEach((name, symbol) -> { BLangXMLQName nsQName = new BLangXMLQName(name.getValue(), XMLConstants.XMLNS_ATTRIBUTE); nsQName.setBType(symTable.stringType); nsQName.accept(this); BIROperand nsQNameIndex = this.env.targetOperand; BIROperand nsURIIndex = generateNamespaceRef(symbol, xmlElementLiteral.pos); setScopeAndEmit(new BIRNonTerminator.FieldAccess(xmlElementLiteral.pos, InstructionKind.XML_ATTRIBUTE_STORE, toVarRef, nsQNameIndex, nsURIIndex)); }); xmlElementLiteral.attributes.forEach(attribute -> { this.env.targetOperand = toVarRef; attribute.accept(this); }); xmlElementLiteral.modifiedChildren.forEach(child -> { child.accept(this); BIROperand childOp = this.env.targetOperand; setScopeAndEmit( new BIRNonTerminator.XMLAccess(child.pos, InstructionKind.XML_SEQ_STORE, toVarRef, childOp)); }); } private BIROperand getQNameOP(BLangExpression qnameExpr, BIROperand keyRegIndex) { if (qnameExpr.getKind() == NodeKind.XML_QNAME) { return keyRegIndex; } BIRVariableDcl tempQNameVarDcl = new BIRVariableDcl(symTable.anyType, this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempQNameVarDcl); BIROperand qnameVarRef = new BIROperand(tempQNameVarDcl); setScopeAndEmit(new BIRNonTerminator.NewStringXMLQName(qnameExpr.pos, qnameVarRef, keyRegIndex)); return qnameVarRef; } private void generateXMLAccess(BLangXMLAccessExpr xmlAccessExpr, BIROperand tempVarRef, BIROperand varRefRegIndex, BIROperand keyRegIndex) { this.env.targetOperand = tempVarRef; InstructionKind insKind; if (xmlAccessExpr.fieldType == FieldKind.ALL) { setScopeAndEmit(new BIRNonTerminator.XMLAccess(xmlAccessExpr.pos, InstructionKind.XML_LOAD_ALL, tempVarRef, varRefRegIndex)); return; } else if (xmlAccessExpr.indexExpr.getBType().tag == TypeTags.STRING) { insKind = InstructionKind.XML_LOAD; } else { insKind = InstructionKind.XML_SEQ_LOAD; } setScopeAndEmit( new BIRNonTerminator.FieldAccess(xmlAccessExpr.pos, insKind, tempVarRef, keyRegIndex, varRefRegIndex)); } private void generateFPVarRef(BLangExpression fpVarRef, BInvokableSymbol funcSymbol) { BIRVariableDcl tempVarLambda = new BIRVariableDcl(fpVarRef.getBType(), this.env.nextLocalVarId(names), VarScope.FUNCTION, VarKind.TEMP); this.env.enclFunc.localVars.add(tempVarLambda); BIROperand lhsOp = new BIROperand(tempVarLambda); Name funcName = getFuncName(funcSymbol); List<BIRVariableDcl> params = new ArrayList<>(); funcSymbol.params.forEach(param -> { BIRVariableDcl birVarDcl = new BIRVariableDcl(fpVarRef.pos, param.type, this.env.nextLambdaVarId(names), VarScope.FUNCTION, VarKind.ARG, null); params.add(birVarDcl); }); BVarSymbol restParam = funcSymbol.restParam; if (restParam != null) { BIRVariableDcl birVarDcl = new BIRVariableDcl(fpVarRef.pos, restParam.type, this.env.nextLambdaVarId(names), VarScope.FUNCTION, VarKind.ARG, null); params.add(birVarDcl); } setScopeAndEmit( new BIRNonTerminator.FPLoad(fpVarRef.pos, funcSymbol.pkgID, funcName, lhsOp, params, new ArrayList<>(), funcSymbol.type, funcSymbol.strandName, funcSymbol.schedulerPolicy)); this.env.targetOperand = lhsOp; } private void populateBIRAnnotAttachments(List<BLangAnnotationAttachment> astAnnotAttachments, List<BIRAnnotationAttachment> birAnnotAttachments, BIRGenEnv currentEnv) { currentEnv.enclAnnotAttachments = birAnnotAttachments; astAnnotAttachments.forEach(annotAttach -> annotAttach.accept(this)); currentEnv.enclAnnotAttachments = null; } private void addToTrapStack(BIRBasicBlock birBasicBlock) { if (this.env.trapBlocks.isEmpty()) { return; } this.env.trapBlocks.peek().add(birBasicBlock); } private List<BIRAnnotationAttachment> getStatementAnnotations(List<BLangAnnotationAttachment> astAnnotAttachments, BIRGenEnv currentEnv) { List<BIRAnnotationAttachment> functionAnnotAttachments = currentEnv.enclAnnotAttachments; currentEnv.enclAnnotAttachments = new ArrayList<>(); astAnnotAttachments.forEach(annotAttach -> annotAttach.accept(this)); List<BIRAnnotationAttachment> statementAnnots = currentEnv.enclAnnotAttachments; currentEnv.enclAnnotAttachments = functionAnnotAttachments; return statementAnnots; } private List<BIRNode.BIRMappingConstructorEntry> generateMappingConstructorEntries( List<RecordLiteralNode.RecordField> fields) { List<BIRNode.BIRMappingConstructorEntry> initialValues = new ArrayList<>(fields.size()); for (RecordLiteralNode.RecordField field : fields) { if (field.isKeyValueField()) { BLangRecordKeyValueField keyValueField = (BLangRecordKeyValueField) field; keyValueField.key.expr.accept(this); BIROperand keyOperand = this.env.targetOperand; keyValueField.valueExpr.accept(this); BIROperand valueOperand = this.env.targetOperand; initialValues.add(new BIRNode.BIRMappingConstructorKeyValueEntry(keyOperand, valueOperand)); continue; } BLangRecordLiteral.BLangRecordSpreadOperatorField spreadField = (BLangRecordLiteral.BLangRecordSpreadOperatorField) field; spreadField.expr.accept(this); initialValues.add(new BIRNode.BIRMappingConstructorSpreadFieldEntry(this.env.targetOperand)); } return initialValues; } }
I think I misunderstood, removed it.
public static boolean usesMultimapState(DoFn<?, ?> doFn) { return usesGivenStateClass(doFn, MultimapState.class) || requiresTimeSortedInput(doFn); }
return usesGivenStateClass(doFn, MultimapState.class) || requiresTimeSortedInput(doFn);
public static boolean usesMultimapState(DoFn<?, ?> doFn) { return usesGivenStateClass(doFn, MultimapState.class); }
class %s: timer declaration field %s is not accessible.", format(DoFn.class), target.getClass().getName(), timerFamilyDeclaration.field().getName())); } } public static boolean isSplittable(DoFn<?, ?> doFn) { return signatureForDoFn(doFn).processElement().isSplittable(); }
class %s: timer declaration field %s is not accessible.", format(DoFn.class), target.getClass().getName(), timerFamilyDeclaration.field().getName())); } } public static boolean isSplittable(DoFn<?, ?> doFn) { return signatureForDoFn(doFn).processElement().isSplittable(); }
this description is used only for the purpose of logging if the connection is lost (see [ZooKeeperMultipleComponentLeaderElectionDriver:175](https://github.com/apache/flink/blob/23d942cb6bba947ca3844687a65e8d0451c62041/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/ZooKeeperMultipleComponentLeaderElectionDriver.java#L175). I couldn't find any code location where the `ZooKeeperMultipleComponentLeaderElectionDriver.toString` method is utilized. Either, we should add some random string here to differentiate different Dispatcher instances or we could remove it entirely (the k8s implementation doesn't make use of it at all but swallows it in `KubernetesMultipleComponentLeaderElectionDriverFactory.create`
private MultipleComponentLeaderElectionService getOrInitializeSingleLeaderElectionService() { if (multipleComponentLeaderElectionService == null) { try { multipleComponentLeaderElectionService = new DefaultMultipleComponentLeaderElectionService( fatalErrorHandler, "Single leader election service.", new ZooKeeperMultipleComponentLeaderElectionDriverFactory( leaderNamespacedCuratorFramework)); } catch (Exception e) { throw new FlinkRuntimeException( String.format( "Could not initialize the %s", DefaultMultipleComponentLeaderElectionService.class .getSimpleName()), e); } } return multipleComponentLeaderElectionService; }
"Single leader election service.",
private MultipleComponentLeaderElectionService getOrInitializeSingleLeaderElectionService() { synchronized (lock) { if (multipleComponentLeaderElectionService == null) { try { multipleComponentLeaderElectionService = new DefaultMultipleComponentLeaderElectionService( fatalErrorHandler, new ZooKeeperMultipleComponentLeaderElectionDriverFactory( leaderNamespacedCuratorFramework)); } catch (Exception e) { throw new FlinkRuntimeException( String.format( "Could not initialize the %s", DefaultMultipleComponentLeaderElectionService.class .getSimpleName()), e); } } return multipleComponentLeaderElectionService; } }
class ZooKeeperMultipleComponentLeaderElectionHaServices extends AbstractZooKeeperHaServices { private final Object lock = new Object(); private final CuratorFramework leaderNamespacedCuratorFramework; private final FatalErrorHandler fatalErrorHandler; @Nullable @GuardedBy("lock") private MultipleComponentLeaderElectionService multipleComponentLeaderElectionService = null; public ZooKeeperMultipleComponentLeaderElectionHaServices( CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper, Configuration config, Executor ioExecutor, BlobStoreService blobStoreService, FatalErrorHandler fatalErrorHandler) throws Exception { super(curatorFrameworkWrapper, ioExecutor, config, blobStoreService); this.leaderNamespacedCuratorFramework = ZooKeeperUtils.useNamespaceAndEnsurePath( getCuratorFramework(), ZooKeeperUtils.getLeaderPath()); this.fatalErrorHandler = fatalErrorHandler; } @Override protected LeaderElectionService createLeaderElectionService(String leaderName) { final MultipleComponentLeaderElectionService multipleComponentLeaderElectionService; synchronized (lock) { multipleComponentLeaderElectionService = getOrInitializeSingleLeaderElectionService(); } return new DefaultLeaderElectionService( multipleComponentLeaderElectionService.createDriverFactory(leaderName)); } @GuardedBy("lock") @Override protected LeaderRetrievalService createLeaderRetrievalService(String leaderPath) { return ZooKeeperUtils.createLeaderRetrievalService( leaderNamespacedCuratorFramework, leaderPath, configuration); } @Override protected void internalClose() throws Exception { Exception exception = null; synchronized (lock) { if (multipleComponentLeaderElectionService != null) { try { multipleComponentLeaderElectionService.close(); } catch (Exception e) { exception = e; } multipleComponentLeaderElectionService = null; } } try { super.internalClose(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } ExceptionUtils.tryRethrowException(exception); } @Override protected void internalCleanupJobData(JobID jobID) throws Exception { super.internalCleanupJobData(jobID); } @Override protected String getLeaderPathForResourceManager() { return ZooKeeperUtils.getResourceManagerNode(); } @Override protected String getLeaderPathForDispatcher() { return ZooKeeperUtils.getDispatcherNode(); } @Override protected String getLeaderPathForJobManager(JobID jobID) { return jobID.toString(); } @Override protected String getLeaderPathForRestServer() { return ZooKeeperUtils.getRestServerNode(); } }
class ZooKeeperMultipleComponentLeaderElectionHaServices extends AbstractZooKeeperHaServices { private final Object lock = new Object(); private final CuratorFramework leaderNamespacedCuratorFramework; private final FatalErrorHandler fatalErrorHandler; @Nullable @GuardedBy("lock") private MultipleComponentLeaderElectionService multipleComponentLeaderElectionService = null; public ZooKeeperMultipleComponentLeaderElectionHaServices( CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper, Configuration config, Executor ioExecutor, BlobStoreService blobStoreService, FatalErrorHandler fatalErrorHandler) throws Exception { super(curatorFrameworkWrapper, ioExecutor, config, blobStoreService); this.leaderNamespacedCuratorFramework = ZooKeeperUtils.useNamespaceAndEnsurePath( getCuratorFramework(), ZooKeeperUtils.getLeaderPath()); this.fatalErrorHandler = fatalErrorHandler; } @Override protected LeaderElectionService createLeaderElectionService(String leaderName) { return new DefaultLeaderElectionService( getOrInitializeSingleLeaderElectionService().createDriverFactory(leaderName)); } @Override protected LeaderRetrievalService createLeaderRetrievalService(String leaderPath) { return ZooKeeperUtils.createLeaderRetrievalService( leaderNamespacedCuratorFramework, leaderPath, configuration); } @Override protected void internalClose() throws Exception { Exception exception = null; synchronized (lock) { if (multipleComponentLeaderElectionService != null) { try { multipleComponentLeaderElectionService.close(); } catch (Exception e) { exception = e; } multipleComponentLeaderElectionService = null; } } try { super.internalClose(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } ExceptionUtils.tryRethrowException(exception); } @Override protected String getLeaderPathForResourceManager() { return ZooKeeperUtils.getResourceManagerNode(); } @Override protected String getLeaderPathForDispatcher() { return ZooKeeperUtils.getDispatcherNode(); } @Override protected String getLeaderPathForJobManager(JobID jobID) { return jobID.toString(); } @Override protected String getLeaderPathForRestServer() { return ZooKeeperUtils.getRestServerNode(); } }
It would be better to have this logic in one place and not distributed across two different classes (head is being marked as ended in the processor, while non heads are marked via task). Also what you proposed adds an (additional) cyclic dependency between Task and Processor, which is also not good. Maybe the best approach would be to encapsulate this whole logic in `OperatorChain` class and pass it to the processors? For example by adding `OperatorChain#endInput(int inputId)` method, which would: - if the head is `TwoInput`, it would track which inputs have ended. If all of them, then it would propagate the `endInput` call down the operator chain. - if the head is `OneInput`, it would just end all of the inputs For the future, we can not be sure how will be `TwoInputStreamOperator` chaining implemented, but if we keep `OperatorChain` class, the support for propagating `endInput` would need to be adjusted only there.
private boolean checkFinished() throws Exception { boolean isFinished = input.isFinished(); if (isFinished) { if (streamOperator instanceof BoundedOneInput) { synchronized (lock) { ((BoundedOneInput) streamOperator).endInput(); } } } return isFinished; }
if (streamOperator instanceof BoundedOneInput) {
private boolean checkFinished() throws Exception { boolean isFinished = input.isFinished(); if (isFinished) { synchronized (lock) { operatorChain.endInput(1); } } return isFinished; }
class StreamInputProcessor<IN> { private static final Logger LOG = LoggerFactory.getLogger(StreamInputProcessor.class); private final StreamTaskInput input; private final Object lock; /** Valve that controls how watermarks and stream statuses are forwarded. */ private StatusWatermarkValve statusWatermarkValve; private final StreamStatusMaintainer streamStatusMaintainer; private final OneInputStreamOperator<IN, ?> streamOperator; private final WatermarkGauge watermarkGauge; private Counter numRecordsIn; @SuppressWarnings("unchecked") public StreamInputProcessor( InputGate[] inputGates, TypeSerializer<IN> inputSerializer, StreamTask<?, ?> checkpointedTask, CheckpointingMode checkpointMode, Object lock, IOManager ioManager, Configuration taskManagerConfig, StreamStatusMaintainer streamStatusMaintainer, OneInputStreamOperator<IN, ?> streamOperator, TaskIOMetricGroup metrics, WatermarkGauge watermarkGauge, String taskName) throws IOException { InputGate inputGate = InputGateUtil.createInputGate(inputGates); CheckpointBarrierHandler barrierHandler = InputProcessorUtil.createCheckpointBarrierHandler( checkpointedTask, checkpointMode, ioManager, inputGate, taskManagerConfig, taskName); this.input = new StreamTaskNetworkInput(barrierHandler, inputSerializer, ioManager, 0); this.lock = checkNotNull(lock); this.streamStatusMaintainer = checkNotNull(streamStatusMaintainer); this.streamOperator = checkNotNull(streamOperator); this.statusWatermarkValve = new StatusWatermarkValve( inputGate.getNumberOfInputChannels(), new ForwardingValveOutputHandler(streamOperator, lock)); this.watermarkGauge = watermarkGauge; metrics.gauge("checkpointAlignmentTime", barrierHandler::getAlignmentDurationNanos); } public boolean processInput() throws Exception { initializeNumRecordsIn(); if (input.isFinished()) { return false; } StreamElement recordOrMark = input.pollNextNullable(); if (recordOrMark == null) { input.isAvailable().get(); return !checkFinished(); } int channel = input.getLastChannel(); checkState(channel != StreamTaskInput.UNSPECIFIED); processElement(recordOrMark, channel); return true; } private void processElement(StreamElement recordOrMark, int channel) throws Exception { if (recordOrMark.isRecord()) { StreamRecord<IN> record = recordOrMark.asRecord(); synchronized (lock) { numRecordsIn.inc(); streamOperator.setKeyContextElement1(record); streamOperator.processElement(record); } } else if (recordOrMark.isWatermark()) { statusWatermarkValve.inputWatermark(recordOrMark.asWatermark(), channel); } else if (recordOrMark.isStreamStatus()) { statusWatermarkValve.inputStreamStatus(recordOrMark.asStreamStatus(), channel); } else if (recordOrMark.isLatencyMarker()) { synchronized (lock) { streamOperator.processLatencyMarker(recordOrMark.asLatencyMarker()); } } else { throw new UnsupportedOperationException("Unknown type of StreamElement"); } } private void initializeNumRecordsIn() { if (numRecordsIn == null) { try { numRecordsIn = ((OperatorMetricGroup) streamOperator.getMetricGroup()).getIOMetricGroup().getNumRecordsInCounter(); } catch (Exception e) { LOG.warn("An exception occurred during the metrics setup.", e); numRecordsIn = new SimpleCounter(); } } } public void cleanup() throws Exception { input.close(); } private class ForwardingValveOutputHandler implements StatusWatermarkValve.ValveOutputHandler { private final OneInputStreamOperator<IN, ?> operator; private final Object lock; private ForwardingValveOutputHandler(final OneInputStreamOperator<IN, ?> operator, final Object lock) { this.operator = checkNotNull(operator); this.lock = checkNotNull(lock); } @Override public void handleWatermark(Watermark watermark) { try { synchronized (lock) { watermarkGauge.setCurrentWatermark(watermark.getTimestamp()); operator.processWatermark(watermark); } } catch (Exception e) { throw new RuntimeException("Exception occurred while processing valve output watermark: ", e); } } @SuppressWarnings("unchecked") @Override public void handleStreamStatus(StreamStatus streamStatus) { try { synchronized (lock) { streamStatusMaintainer.toggleStreamStatus(streamStatus); } } catch (Exception e) { throw new RuntimeException("Exception occurred while processing valve output stream status: ", e); } } } }
class StreamInputProcessor<IN> { private static final Logger LOG = LoggerFactory.getLogger(StreamInputProcessor.class); private final StreamTaskInput input; private final Object lock; private final OperatorChain<?, ?> operatorChain; /** Valve that controls how watermarks and stream statuses are forwarded. */ private StatusWatermarkValve statusWatermarkValve; private final StreamStatusMaintainer streamStatusMaintainer; private final OneInputStreamOperator<IN, ?> streamOperator; private final WatermarkGauge watermarkGauge; private Counter numRecordsIn; @SuppressWarnings("unchecked") public StreamInputProcessor( InputGate[] inputGates, TypeSerializer<IN> inputSerializer, StreamTask<?, ?> checkpointedTask, CheckpointingMode checkpointMode, Object lock, IOManager ioManager, Configuration taskManagerConfig, StreamStatusMaintainer streamStatusMaintainer, OneInputStreamOperator<IN, ?> streamOperator, TaskIOMetricGroup metrics, WatermarkGauge watermarkGauge, String taskName, OperatorChain<?, ?> operatorChain) throws IOException { InputGate inputGate = InputGateUtil.createInputGate(inputGates); CheckpointBarrierHandler barrierHandler = InputProcessorUtil.createCheckpointBarrierHandler( checkpointedTask, checkpointMode, ioManager, inputGate, taskManagerConfig, taskName); this.input = new StreamTaskNetworkInput(barrierHandler, inputSerializer, ioManager, 0); this.lock = checkNotNull(lock); this.streamStatusMaintainer = checkNotNull(streamStatusMaintainer); this.streamOperator = checkNotNull(streamOperator); this.statusWatermarkValve = new StatusWatermarkValve( inputGate.getNumberOfInputChannels(), new ForwardingValveOutputHandler(streamOperator, lock)); this.watermarkGauge = watermarkGauge; metrics.gauge("checkpointAlignmentTime", barrierHandler::getAlignmentDurationNanos); this.operatorChain = checkNotNull(operatorChain); } public boolean processInput() throws Exception { initializeNumRecordsIn(); StreamElement recordOrMark = input.pollNextNullable(); if (recordOrMark == null) { input.isAvailable().get(); return !checkFinished(); } int channel = input.getLastChannel(); checkState(channel != StreamTaskInput.UNSPECIFIED); processElement(recordOrMark, channel); return true; } private void processElement(StreamElement recordOrMark, int channel) throws Exception { if (recordOrMark.isRecord()) { StreamRecord<IN> record = recordOrMark.asRecord(); synchronized (lock) { numRecordsIn.inc(); streamOperator.setKeyContextElement1(record); streamOperator.processElement(record); } } else if (recordOrMark.isWatermark()) { statusWatermarkValve.inputWatermark(recordOrMark.asWatermark(), channel); } else if (recordOrMark.isStreamStatus()) { statusWatermarkValve.inputStreamStatus(recordOrMark.asStreamStatus(), channel); } else if (recordOrMark.isLatencyMarker()) { synchronized (lock) { streamOperator.processLatencyMarker(recordOrMark.asLatencyMarker()); } } else { throw new UnsupportedOperationException("Unknown type of StreamElement"); } } private void initializeNumRecordsIn() { if (numRecordsIn == null) { try { numRecordsIn = ((OperatorMetricGroup) streamOperator.getMetricGroup()).getIOMetricGroup().getNumRecordsInCounter(); } catch (Exception e) { LOG.warn("An exception occurred during the metrics setup.", e); numRecordsIn = new SimpleCounter(); } } } public void cleanup() throws Exception { input.close(); } private class ForwardingValveOutputHandler implements StatusWatermarkValve.ValveOutputHandler { private final OneInputStreamOperator<IN, ?> operator; private final Object lock; private ForwardingValveOutputHandler(final OneInputStreamOperator<IN, ?> operator, final Object lock) { this.operator = checkNotNull(operator); this.lock = checkNotNull(lock); } @Override public void handleWatermark(Watermark watermark) { try { synchronized (lock) { watermarkGauge.setCurrentWatermark(watermark.getTimestamp()); operator.processWatermark(watermark); } } catch (Exception e) { throw new RuntimeException("Exception occurred while processing valve output watermark: ", e); } } @SuppressWarnings("unchecked") @Override public void handleStreamStatus(StreamStatus streamStatus) { try { synchronized (lock) { streamStatusMaintainer.toggleStreamStatus(streamStatus); } } catch (Exception e) { throw new RuntimeException("Exception occurred while processing valve output stream status: ", e); } } } }
Openjdk has com.sun.management available , and is giving numbers. I will try with another jdk maybe IBM
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String used_Memory = totalMemory - freeMemory + " KB"; String available_Memory = (maxMemory - (totalMemory - freeMemory)) + " KB"; OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); String processCpuLoad = Double.toString(mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + used_Memory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + available_Memory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB"; String processCpuLoad = Double.toString(this.mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(this.mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + usedMemory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + availableMemory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); ISessionToken sessionToken = null; String headerValue; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { sessionToken = SessionTokenHelper.parse(headerValue); } double requestCharge = 0; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { requestCharge = Double.parseDouble(headerValue); } this.gatewayStatistic.sessionToken = sessionToken; this.gatewayStatistic.requestCharge = requestCharge; } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private ISessionToken sessionToken; private double requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + (this.sessionToken != null ? this.sessionToken.convertToString() : null) + "Request Charge : " + requestCharge + '}'; } } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private final static OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)ManagementFactory.getOperatingSystemMXBean(); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); this.gatewayStatistic.sessionToken = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN); this.gatewayStatistic.requestCharge = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE); } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private String sessionToken; private String requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + this.sessionToken + "Request Charge : " + requestCharge + '}'; } } }
The diff output you posted indicates modifications in a snippet of Java code likely from a version control system such as git. Here are some observations and suggestions for improvements: 1. **Alignment and Indentation:** - The alignment correction for the string literals is an improvement to code readability. 2. **Acquiring Locks:** - The original code acquires a read lock on a database object before iterating over tables, which is removed in the new segment. This raises concurrency concerns. - **Important**: If the comment justifying the removal of the lock (avoiding the holding of a lock) is valid due to the immutability of certain fields it accesses, then this might be fine. However, this should be verified with caution as it can lead to race conditions if other threads are able to modify the tables while this loop is running. 3. **Avoiding Redundant Long Wrapper Object Comparison:** - Good use of `==` for comparing long values instead of using `Long` objects unnecessarily. `((Long) m.getValue()).longValue() == 0L` has been changed to `(Long) m.getValue() == 0L`, which avoids unboxing if `m.getValue()` already returns a primitive long. 4. **Unnecessary Code:** - Removal of locking can make sense under certain conditions, but it hugely depends on concurrent access patterns that are not visible from the snippet. If removing the lock does not jeopardize the thread safety or there's some mechanism ensuring database content is not being mutated while metrics collection occurs, then this would be beneficial in reducing potential contention overhead. 5. **Comment clarity:** - An explanatory comment indicates that the locking was removed because the function only reads tables and their immutable fields. It is good to keep explanations when changing significant behavior like lock handling, but the team maintaining the code should fully understand the implications of such a change. 6. **Concurrency Assurance:** - If the nature of `TableMetricsRegistry.getInstance().getMetricsEntity(tableId)` or operations inside the loop ensure thread-safety, and databases are infrequently mutated, the decision could potentially stand. Make sure all members working around this code understand the changes to concurrency assumptions. 7. **General Concerns:** - It would be advisable to check for any related tests that assure concurrency integrity to see if they pass after making such changes to grasp the potential impact better. 8. **Refactoring Suggestions:** - If the method `collectTableMetrics` is called frequently and the system has performance constraints under high load, removing the locks could indeed improve performance. - Additionally, profiling could identify bottlenecks that the removal of a lock might alleviate. However, tests focusing on concurrency should pass without incident after the change. - Another approach could be to implement a more granular or optimistic locking strategy if the primary concern is the overhead caused by lock acquisition. Please use these points as guidelines and incorporate relevant unit and integration tests to verify the changes' correctness. Remember that concurrency-related issues can be subtle and hard to track, so careful testing is paramount.
private static void collectTableMetrics(MetricVisitor visitor, boolean minifyTableMetrics) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); for (String dbName : dbNames) { Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (null == db) { continue; } for (Table table : db.getTables()) { long tableId = table.getId(); String tableName = table.getName(); TableMetricsEntity entity = TableMetricsRegistry.getInstance().getMetricsEntity(tableId); for (Metric m : entity.getMetrics()) { if (minifyTableMetrics && (null == m.getValue() || (MetricType.COUNTER == m.type && (Long) m.getValue() == 0L))) { continue; } m.addLabel(new MetricLabel("db_name", dbName)) .addLabel(new MetricLabel("tbl_name", tableName)) .addLabel(new MetricLabel("tbl_id", String.valueOf(tableId))); visitor.visit(m); } } } }
}
private static void collectTableMetrics(MetricVisitor visitor, boolean minifyTableMetrics) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); for (String dbName : dbNames) { Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (null == db) { continue; } for (Table table : db.getTables()) { long tableId = table.getId(); String tableName = table.getName(); TableMetricsEntity entity = TableMetricsRegistry.getInstance().getMetricsEntity(tableId); for (Metric m : entity.getMetrics()) { if (minifyTableMetrics && (null == m.getValue() || (MetricType.COUNTER == m.type && (Long) m.getValue() == 0L))) { continue; } m.addLabel(new MetricLabel("db_name", dbName)) .addLabel(new MetricLabel("tbl_name", tableName)) .addLabel(new MetricLabel("tbl_id", String.valueOf(tableId))); visitor.visit(m); } } } }
class MetricRepo { private static final Logger LOG = LogManager.getLogger(MetricRepo.class); private static final MetricRegistry METRIC_REGISTER = new MetricRegistry(); private static final StarRocksMetricRegistry STARROCKS_METRIC_REGISTER = new StarRocksMetricRegistry(); public static volatile boolean isInit = false; public static final SystemMetrics SYSTEM_METRICS = new SystemMetrics(); public static final String TABLET_NUM = "tablet_num"; public static final String TABLET_MAX_COMPACTION_SCORE = "tablet_max_compaction_score"; public static LongCounterMetric COUNTER_REQUEST_ALL; public static LongCounterMetric COUNTER_QUERY_ALL; public static LongCounterMetric COUNTER_QUERY_ERR; public static LongCounterMetric COUNTER_QUERY_TIMEOUT; public static LongCounterMetric COUNTER_QUERY_SUCCESS; public static LongCounterMetric COUNTER_SLOW_QUERY; public static LongCounterMetric COUNTER_QUERY_QUEUE_PENDING; public static LongCounterMetric COUNTER_QUERY_QUEUE_TOTAL; public static LongCounterMetric COUNTER_QUERY_QUEUE_TIMEOUT; public static LongCounterMetric COUNTER_UNFINISHED_BACKUP_JOB; public static LongCounterMetric COUNTER_UNFINISHED_RESTORE_JOB; public static LongCounterMetric COUNTER_LOAD_ADD; public static LongCounterMetric COUNTER_LOAD_FINISHED; public static LongCounterMetric COUNTER_EDIT_LOG_WRITE; public static LongCounterMetric COUNTER_EDIT_LOG_READ; public static LongCounterMetric COUNTER_EDIT_LOG_SIZE_BYTES; public static LongCounterMetric COUNTER_IMAGE_WRITE; public static LongCounterMetric COUNTER_IMAGE_PUSH; public static LongCounterMetric COUNTER_TXN_REJECT; public static LongCounterMetric COUNTER_TXN_BEGIN; public static LongCounterMetric COUNTER_TXN_FAILED; public static LongCounterMetric COUNTER_TXN_SUCCESS; public static LongCounterMetric COUNTER_ROUTINE_LOAD_ROWS; public static LongCounterMetric COUNTER_ROUTINE_LOAD_RECEIVED_BYTES; public static LongCounterMetric COUNTER_ROUTINE_LOAD_ERROR_ROWS; public static LongCounterMetric COUNTER_ROUTINE_LOAD_PAUSED; public static Histogram HISTO_QUERY_LATENCY; public static Histogram HISTO_EDIT_LOG_WRITE_LATENCY; public static Histogram HISTO_JOURNAL_WRITE_LATENCY; public static Histogram HISTO_JOURNAL_WRITE_BATCH; public static Histogram HISTO_JOURNAL_WRITE_BYTES; public static GaugeMetricImpl<Double> GAUGE_QUERY_PER_SECOND; public static GaugeMetricImpl<Double> GAUGE_REQUEST_PER_SECOND; public static GaugeMetricImpl<Double> GAUGE_QUERY_ERR_RATE; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_MEAN; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_MEDIAN; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P75; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P90; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P95; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P99; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P999; public static GaugeMetricImpl<Long> GAUGE_MAX_TABLET_COMPACTION_SCORE; public static GaugeMetricImpl<Long> GAUGE_STACKED_JOURNAL_NUM; public static List<GaugeMetricImpl<Long>> GAUGE_ROUTINE_LOAD_LAGS; public static GaugeMetricImpl<Integer> GAUGE_SAFE_MODE; private static final ScheduledThreadPoolExecutor METRIC_TIMER = ThreadPoolManager.newDaemonScheduledThreadPool(1, "Metric-Timer-Pool", true); private static final MetricCalculator METRIC_CALCULATOR = new MetricCalculator(); public static synchronized void init() { if (isInit) { return; } GAUGE_ROUTINE_LOAD_LAGS = new ArrayList<>(); LoadMgr loadManger = GlobalStateMgr.getCurrentState().getLoadMgr(); for (EtlJobType jobType : EtlJobType.values()) { if (jobType == EtlJobType.MINI || jobType == EtlJobType.UNKNOWN) { continue; } for (JobState state : JobState.values()) { GaugeMetric<Long> gauge = new GaugeMetric<Long>("job", MetricUnit.NOUNIT, "job statistics") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } return loadManger.getLoadJobNum(state, jobType); } }; gauge.addLabel(new MetricLabel("job", "load")) .addLabel(new MetricLabel("type", jobType.name())) .addLabel(new MetricLabel("state", state.name())); STARROCKS_METRIC_REGISTER.addMetric(gauge); } } AlterJobMgr alter = GlobalStateMgr.getCurrentState().getAlterJobMgr(); for (AlterJobV2.JobType jobType : AlterJobV2.JobType.values()) { if (jobType != AlterJobV2.JobType.SCHEMA_CHANGE && jobType != AlterJobV2.JobType.ROLLUP) { continue; } GaugeMetric<Long> gauge = new GaugeMetric<Long>("job", MetricUnit.NOUNIT, "job statistics") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } if (jobType == AlterJobV2.JobType.SCHEMA_CHANGE) { return alter.getSchemaChangeHandler() .getAlterJobV2Num(AlterJobV2.JobState.RUNNING); } else { return alter.getMaterializedViewHandler() .getAlterJobV2Num(AlterJobV2.JobState.RUNNING); } } }; gauge.addLabel(new MetricLabel("job", "alter")) .addLabel(new MetricLabel("type", jobType.name())) .addLabel(new MetricLabel("state", "running")); STARROCKS_METRIC_REGISTER.addMetric(gauge); } generateBackendsTabletMetrics(); GaugeMetric<Integer> conections = new GaugeMetric<Integer>( "connection_total", MetricUnit.CONNECTIONS, "total connections") { @Override public Integer getValue() { return ExecuteEnv.getInstance().getScheduler().getConnectionNum(); } }; STARROCKS_METRIC_REGISTER.addMetric(conections); GaugeMetric<Long> maxJournalId = (GaugeMetric<Long>) new GaugeMetric<Long>( "max_journal_id", MetricUnit.NOUNIT, "max journal id of this frontends") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getMaxJournalId(); } }; STARROCKS_METRIC_REGISTER.addMetric(maxJournalId); GaugeMetric<Long> metaLogCount = new GaugeMetric<Long>( "meta_log_count", MetricUnit.NOUNIT, "meta log total count") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getMaxJournalId() - GlobalStateMgr.getCurrentState().getImageJournalId(); } }; STARROCKS_METRIC_REGISTER.addMetric(metaLogCount); GaugeMetric<Long> scheduledTabletNum = (GaugeMetric<Long>) new GaugeMetric<Long>( "scheduled_tablet_num", MetricUnit.NOUNIT, "number of tablets being scheduled") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } return (long) GlobalStateMgr.getCurrentState().getTabletScheduler().getTotalNum(); } }; STARROCKS_METRIC_REGISTER.addMetric(scheduledTabletNum); RoutineLoadMgr routineLoadManger = GlobalStateMgr.getCurrentState().getRoutineLoadMgr(); for (RoutineLoadJob.JobState state : RoutineLoadJob.JobState.values()) { GaugeMetric<Long> gauge = new GaugeMetric<Long>("routine_load_jobs", MetricUnit.NOUNIT, "routine load jobs") { @Override public Long getValue() { if (null == routineLoadManger) { return 0L; } return (long) routineLoadManger.getRoutineLoadJobByState(Sets.newHashSet(state)).size(); } }; gauge.addLabel(new MetricLabel("state", state.name())); STARROCKS_METRIC_REGISTER.addMetric(gauge); } GAUGE_QUERY_PER_SECOND = new GaugeMetricImpl<>("qps", MetricUnit.NOUNIT, "query per second"); GAUGE_QUERY_PER_SECOND.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_PER_SECOND); GAUGE_REQUEST_PER_SECOND = new GaugeMetricImpl<>("rps", MetricUnit.NOUNIT, "request per second"); GAUGE_REQUEST_PER_SECOND.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_REQUEST_PER_SECOND); GAUGE_QUERY_ERR_RATE = new GaugeMetricImpl<>("query_err_rate", MetricUnit.NOUNIT, "query error rate"); GAUGE_QUERY_ERR_RATE.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_ERR_RATE); GAUGE_MAX_TABLET_COMPACTION_SCORE = new GaugeMetricImpl<>("max_tablet_compaction_score", MetricUnit.NOUNIT, "max tablet compaction score of all backends"); GAUGE_MAX_TABLET_COMPACTION_SCORE.setValue(0L); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_MAX_TABLET_COMPACTION_SCORE); GAUGE_STACKED_JOURNAL_NUM = new GaugeMetricImpl<>( "editlog_stacked_num", MetricUnit.OPERATIONS, "counter of edit log that are stacked"); GAUGE_STACKED_JOURNAL_NUM.setValue(0L); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_STACKED_JOURNAL_NUM); GAUGE_QUERY_LATENCY_MEAN = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "mean of query latency"); GAUGE_QUERY_LATENCY_MEAN.addLabel(new MetricLabel("type", "mean")); GAUGE_QUERY_LATENCY_MEAN.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_MEAN); GAUGE_QUERY_LATENCY_MEDIAN = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "median of query latency"); GAUGE_QUERY_LATENCY_MEDIAN.addLabel(new MetricLabel("type", "50_quantile")); GAUGE_QUERY_LATENCY_MEDIAN.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_MEDIAN); GAUGE_QUERY_LATENCY_P75 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p75 of query latency"); GAUGE_QUERY_LATENCY_P75.addLabel(new MetricLabel("type", "75_quantile")); GAUGE_QUERY_LATENCY_P75.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P75); GAUGE_QUERY_LATENCY_P90 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p90 of query latency"); GAUGE_QUERY_LATENCY_P90.addLabel(new MetricLabel("type", "90_quantile")); GAUGE_QUERY_LATENCY_P90.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P90); GAUGE_QUERY_LATENCY_P95 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p95 of query latency"); GAUGE_QUERY_LATENCY_P95.addLabel(new MetricLabel("type", "95_quantile")); GAUGE_QUERY_LATENCY_P95.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P95); GAUGE_QUERY_LATENCY_P99 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p99 of query latency"); GAUGE_QUERY_LATENCY_P99.addLabel(new MetricLabel("type", "99_quantile")); GAUGE_QUERY_LATENCY_P99.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P99); GAUGE_QUERY_LATENCY_P999 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p999 of query latency"); GAUGE_QUERY_LATENCY_P999.addLabel(new MetricLabel("type", "999_quantile")); GAUGE_QUERY_LATENCY_P999.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P999); GAUGE_SAFE_MODE = new GaugeMetricImpl<>("safe_mode", MetricUnit.NOUNIT, "safe mode flag"); GAUGE_SAFE_MODE.addLabel(new MetricLabel("type", "safe_mode")); GAUGE_SAFE_MODE.setValue(0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_SAFE_MODE); COUNTER_REQUEST_ALL = new LongCounterMetric("request_total", MetricUnit.REQUESTS, "total request"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_REQUEST_ALL); COUNTER_QUERY_ALL = new LongCounterMetric("query_total", MetricUnit.REQUESTS, "total query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_ALL); COUNTER_QUERY_ERR = new LongCounterMetric("query_err", MetricUnit.REQUESTS, "total error query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_ERR); COUNTER_QUERY_TIMEOUT = new LongCounterMetric("query_timeout", MetricUnit.REQUESTS, "total timeout query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_TIMEOUT); COUNTER_QUERY_SUCCESS = new LongCounterMetric("query_success", MetricUnit.REQUESTS, "total success query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_SUCCESS); COUNTER_SLOW_QUERY = new LongCounterMetric("slow_query", MetricUnit.REQUESTS, "total slow query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_SLOW_QUERY); COUNTER_QUERY_QUEUE_PENDING = new LongCounterMetric("query_queue_pending", MetricUnit.REQUESTS, "total pending query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_QUEUE_PENDING); COUNTER_QUERY_QUEUE_TOTAL = new LongCounterMetric("query_queue_total", MetricUnit.REQUESTS, "total history queued query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_QUEUE_TOTAL); COUNTER_QUERY_QUEUE_TIMEOUT = new LongCounterMetric("query_queue_timeout", MetricUnit.REQUESTS, "total history query for timeout in queue"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_QUEUE_TIMEOUT); COUNTER_LOAD_ADD = new LongCounterMetric("load_add", MetricUnit.REQUESTS, "total load submit"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_LOAD_ADD); COUNTER_ROUTINE_LOAD_PAUSED = new LongCounterMetric("routine_load_paused", MetricUnit.REQUESTS, "counter of routine load paused"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_ROUTINE_LOAD_PAUSED); COUNTER_LOAD_FINISHED = new LongCounterMetric("load_finished", MetricUnit.REQUESTS, "total load finished"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_LOAD_FINISHED); COUNTER_EDIT_LOG_WRITE = new LongCounterMetric("edit_log_write", MetricUnit.OPERATIONS, "counter of edit log write into bdbje"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_EDIT_LOG_WRITE); COUNTER_EDIT_LOG_READ = new LongCounterMetric("edit_log_read", MetricUnit.OPERATIONS, "counter of edit log read from bdbje"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_EDIT_LOG_READ); COUNTER_EDIT_LOG_SIZE_BYTES = new LongCounterMetric("edit_log_size_bytes", MetricUnit.BYTES, "size of edit log"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_EDIT_LOG_SIZE_BYTES); COUNTER_IMAGE_WRITE = new LongCounterMetric("image_write", MetricUnit.OPERATIONS, "counter of image generated"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_IMAGE_WRITE); COUNTER_IMAGE_PUSH = new LongCounterMetric("image_push", MetricUnit.OPERATIONS, "counter of image succeeded in pushing to other frontends"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_IMAGE_PUSH); COUNTER_TXN_REJECT = new LongCounterMetric("txn_reject", MetricUnit.REQUESTS, "counter of rejected transactions"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_TXN_REJECT); COUNTER_TXN_BEGIN = new LongCounterMetric("txn_begin", MetricUnit.REQUESTS, "counter of begining transactions"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_TXN_BEGIN); COUNTER_TXN_SUCCESS = new LongCounterMetric("txn_success", MetricUnit.REQUESTS, "counter of success transactions"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_TXN_SUCCESS); COUNTER_TXN_FAILED = new LongCounterMetric("txn_failed", MetricUnit.REQUESTS, "counter of failed transactions"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_TXN_FAILED); COUNTER_ROUTINE_LOAD_ROWS = new LongCounterMetric("routine_load_rows", MetricUnit.ROWS, "total rows of routine load"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_ROUTINE_LOAD_ROWS); COUNTER_ROUTINE_LOAD_RECEIVED_BYTES = new LongCounterMetric("routine_load_receive_bytes", MetricUnit.BYTES, "total received bytes of routine load"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_ROUTINE_LOAD_RECEIVED_BYTES); COUNTER_ROUTINE_LOAD_ERROR_ROWS = new LongCounterMetric("routine_load_error_rows", MetricUnit.ROWS, "total error rows of routine load"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_ROUTINE_LOAD_ERROR_ROWS); COUNTER_UNFINISHED_BACKUP_JOB = new LongCounterMetric("unfinished_backup_job", MetricUnit.REQUESTS, "current unfinished backup job"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_UNFINISHED_BACKUP_JOB); COUNTER_UNFINISHED_RESTORE_JOB = new LongCounterMetric("unfinished_restore_job", MetricUnit.REQUESTS, "current unfinished restore job"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_UNFINISHED_RESTORE_JOB); List<Database> dbs = Lists.newArrayList(); if (GlobalStateMgr.getCurrentState().getIdToDb() != null) { for (Map.Entry<Long, Database> entry : GlobalStateMgr.getCurrentState().getIdToDb().entrySet()) { dbs.add(entry.getValue()); } for (Database db : dbs) { AbstractJob jobI = GlobalStateMgr.getCurrentState().getBackupHandler().getJob(db.getId()); if (jobI instanceof BackupJob && !((BackupJob) jobI).isDone()) { COUNTER_UNFINISHED_BACKUP_JOB.increase(1L); } else if (jobI instanceof RestoreJob && !((RestoreJob) jobI).isDone()) { COUNTER_UNFINISHED_RESTORE_JOB.increase(1L); } } } HISTO_QUERY_LATENCY = METRIC_REGISTER.histogram(MetricRegistry.name("query", "latency", "ms")); HISTO_EDIT_LOG_WRITE_LATENCY = METRIC_REGISTER.histogram(MetricRegistry.name("editlog", "write", "latency", "ms")); HISTO_JOURNAL_WRITE_LATENCY = METRIC_REGISTER.histogram(MetricRegistry.name("journal", "write", "latency", "ms")); HISTO_JOURNAL_WRITE_BATCH = METRIC_REGISTER.histogram(MetricRegistry.name("journal", "write", "batch")); HISTO_JOURNAL_WRITE_BYTES = METRIC_REGISTER.histogram(MetricRegistry.name("journal", "write", "bytes")); initSystemMetrics(); initMemoryMetrics(); updateMetrics(); isInit = true; if (Config.enable_metric_calculator) { METRIC_TIMER.scheduleAtFixedRate(METRIC_CALCULATOR, 0, 15 * 1000L, TimeUnit.MILLISECONDS); } } private static void initSystemMetrics() { GaugeMetric<Long> tcpRetransSegs = (GaugeMetric<Long>) new GaugeMetric<Long>( "snmp", MetricUnit.NOUNIT, "All TCP packets retransmitted") { @Override public Long getValue() { return SYSTEM_METRICS.tcpRetransSegs; } }; tcpRetransSegs.addLabel(new MetricLabel("name", "tcp_retrans_segs")); STARROCKS_METRIC_REGISTER.addMetric(tcpRetransSegs); GaugeMetric<Long> tpcInErrs = (GaugeMetric<Long>) new GaugeMetric<Long>( "snmp", MetricUnit.NOUNIT, "The number of all problematic TCP packets received") { @Override public Long getValue() { return SYSTEM_METRICS.tcpInErrs; } }; tpcInErrs.addLabel(new MetricLabel("name", "tcp_in_errs")); STARROCKS_METRIC_REGISTER.addMetric(tpcInErrs); GaugeMetric<Long> tpcInSegs = (GaugeMetric<Long>) new GaugeMetric<Long>( "snmp", MetricUnit.NOUNIT, "The number of all TCP packets received") { @Override public Long getValue() { return SYSTEM_METRICS.tcpInSegs; } }; tpcInSegs.addLabel(new MetricLabel("name", "tcp_in_segs")); STARROCKS_METRIC_REGISTER.addMetric(tpcInSegs); GaugeMetric<Long> tpcOutSegs = (GaugeMetric<Long>) new GaugeMetric<Long>( "snmp", MetricUnit.NOUNIT, "The number of all TCP packets send with RST") { @Override public Long getValue() { return SYSTEM_METRICS.tcpOutSegs; } }; tpcOutSegs.addLabel(new MetricLabel("name", "tcp_out_segs")); STARROCKS_METRIC_REGISTER.addMetric(tpcOutSegs); } public static void initMemoryMetrics() { GaugeMetric<Long> tabletCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of tablets") { @Override public Long getValue() { return GlobalStateMgr.getCurrentInvertedIndex().getTabletCount(); } }; tabletCnt.addLabel(new MetricLabel("type", "tablet_count")); STARROCKS_METRIC_REGISTER.addMetric(tabletCnt); GaugeMetric<Long> tabletBytes = new GaugeMetric<Long>("memory", MetricUnit.BYTES, "The bytes of tablets") { @Override public Long getValue() { return GlobalStateMgr.getCurrentInvertedIndex().getTabletCount() * SizeEstimator.estimate(new LocalTablet()); } }; tabletBytes.addLabel(new MetricLabel("type", "tablet_bytes")); STARROCKS_METRIC_REGISTER.addMetric(tabletBytes); GaugeMetric<Long> replicaCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of replicas") { @Override public Long getValue() { return GlobalStateMgr.getCurrentInvertedIndex().getReplicaCount(); } }; replicaCnt.addLabel(new MetricLabel("type", "replica_count")); STARROCKS_METRIC_REGISTER.addMetric(replicaCnt); GaugeMetric<Long> replicaBytes = new GaugeMetric<Long>("memory", MetricUnit.BYTES, "The bytes of replicas") { @Override public Long getValue() { return GlobalStateMgr.getCurrentInvertedIndex().getReplicaCount() * SizeEstimator.estimate(new Replica()); } }; replicaBytes.addLabel(new MetricLabel("type", "replica_bytes")); STARROCKS_METRIC_REGISTER.addMetric(replicaBytes); GaugeMetric<Long> txnCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of txns") { @Override public Long getValue() { return (long) GlobalStateMgr.getCurrentGlobalTransactionMgr().getFinishedTransactionNum(); } }; txnCnt.addLabel(new MetricLabel("type", "txn_count")); STARROCKS_METRIC_REGISTER.addMetric(txnCnt); GaugeMetric<Long> txnBytes = new GaugeMetric<Long>("memory", MetricUnit.BYTES, "The bytes of txns") { @Override public Long getValue() { return GlobalStateMgr.getCurrentGlobalTransactionMgr().getFinishedTransactionNum() * SizeEstimator.estimate(new TransactionState()); } }; txnBytes.addLabel(new MetricLabel("type", "txn_bytes")); STARROCKS_METRIC_REGISTER.addMetric(txnBytes); GaugeMetric<Long> txnCallbackCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of txn callbacks") { @Override public Long getValue() { return GlobalStateMgr.getCurrentGlobalTransactionMgr().getCallbackFactory().getCallBackCnt(); } }; txnCallbackCnt.addLabel(new MetricLabel("type", "txn_callback_count")); STARROCKS_METRIC_REGISTER.addMetric(txnCallbackCnt); GaugeMetric<Long> deleteJobCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of delete jobs") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getDeleteMgr().getDeleteJobCount(); } }; deleteJobCnt.addLabel(new MetricLabel("type", "delete_job_count")); STARROCKS_METRIC_REGISTER.addMetric(deleteJobCnt); GaugeMetric<Long> deleteJobInfoCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of delete job info") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getDeleteMgr().getDeleteInfoCount(); } }; deleteJobInfoCnt.addLabel(new MetricLabel("type", "delete_job_info_count")); STARROCKS_METRIC_REGISTER.addMetric(deleteJobInfoCnt); GaugeMetric<Long> taskCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of tasks") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getTaskManager().getTaskCount(); } }; taskCnt.addLabel(new MetricLabel("type", "task_count")); STARROCKS_METRIC_REGISTER.addMetric(taskCnt); GaugeMetric<Long> runningTaskRunCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of running task_run") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager().getRunningTaskRunCount(); } }; runningTaskRunCount.addLabel(new MetricLabel("type", "running_task_run_count")); STARROCKS_METRIC_REGISTER.addMetric(runningTaskRunCount); GaugeMetric<Long> pendingTaskRunCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of pending task_run") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager().getPendingTaskRunCount(); } }; pendingTaskRunCount.addLabel(new MetricLabel("type", "pending_task_run_count")); STARROCKS_METRIC_REGISTER.addMetric(pendingTaskRunCount); GaugeMetric<Long> historyTaskRunCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of history task_run") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager().getHistoryTaskRunCount(); } }; historyTaskRunCount.addLabel(new MetricLabel("type", "history_task_run_count")); STARROCKS_METRIC_REGISTER.addMetric(historyTaskRunCount); GaugeMetric<Long> catalogCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of catalogs") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getCatalogMgr().getCatalogCount(); } }; catalogCount.addLabel(new MetricLabel("type", "catalogs_count")); STARROCKS_METRIC_REGISTER.addMetric(catalogCount); GaugeMetric<Long> insertOverwriteJobCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of insert overwrite jobs") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getInsertOverwriteJobMgr().getJobNum(); } }; insertOverwriteJobCount.addLabel(new MetricLabel("type", "insert_overwrite_jobs_count")); STARROCKS_METRIC_REGISTER.addMetric(insertOverwriteJobCount); GaugeMetric<Long> compactionStatsCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of compaction statistic") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getCompactionMgr().getPartitionStatsCount(); } }; compactionStatsCount.addLabel(new MetricLabel("type", "compaction_stats_count")); STARROCKS_METRIC_REGISTER.addMetric(compactionStatsCount); GaugeMetric<Long> streamLoadTaskCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of stream load tasks") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getStreamLoadMgr().getStreamLoadTaskCount(); } }; streamLoadTaskCount.addLabel(new MetricLabel("type", "stream_load_task_count")); STARROCKS_METRIC_REGISTER.addMetric(streamLoadTaskCount); GaugeMetric<Long> queryDetailCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of cached query details") { @Override public Long getValue() { return QueryDetailQueue.getTotalQueriesCount(); } }; queryDetailCount.addLabel(new MetricLabel("type", "query_detail_count")); STARROCKS_METRIC_REGISTER.addMetric(queryDetailCount); GaugeMetric<Long> queryProfileCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of cached query profile") { @Override public Long getValue() { return ProfileManager.getInstance().getQueryProfileCount(); } }; queryProfileCount.addLabel(new MetricLabel("type", "query_profile_count")); STARROCKS_METRIC_REGISTER.addMetric(queryProfileCount); GaugeMetric<Long> loadProfileCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of cached load profile") { @Override public Long getValue() { return ProfileManager.getInstance().getLoadProfileCount(); } }; loadProfileCount.addLabel(new MetricLabel("type", "load_profile_count")); STARROCKS_METRIC_REGISTER.addMetric(loadProfileCount); GaugeMetric<Long> queryCoordinatorCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of running query coordinator") { @Override public Long getValue() { return QeProcessorImpl.INSTANCE.getCoordinatorCount(); } }; queryCoordinatorCount.addLabel(new MetricLabel("type", "query_coordinator_count")); STARROCKS_METRIC_REGISTER.addMetric(queryCoordinatorCount); GaugeMetric<Long> agentTaskCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of agent task") { @Override public Long getValue() { return (long) AgentTaskQueue.getTaskNum(); } }; agentTaskCount.addLabel(new MetricLabel("type", "agent_task_count")); STARROCKS_METRIC_REGISTER.addMetric(agentTaskCount); } public static void generateBackendsTabletMetrics() { STARROCKS_METRIC_REGISTER.removeMetrics(TABLET_NUM); STARROCKS_METRIC_REGISTER.removeMetrics(TABLET_MAX_COMPACTION_SCORE); SystemInfoService infoService = GlobalStateMgr.getCurrentSystemInfo(); TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex(); for (Long beId : infoService.getBackendIds(false)) { Backend be = infoService.getBackend(beId); if (be == null) { continue; } GaugeMetric<Long> tabletNum = (GaugeMetric<Long>) new GaugeMetric<Long>(TABLET_NUM, MetricUnit.NOUNIT, "tablet number") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } return invertedIndex.getTabletNumByBackendId(beId); } }; tabletNum.addLabel(new MetricLabel("backend", be.getHost() + ":" + be.getHeartbeatPort())); STARROCKS_METRIC_REGISTER.addMetric(tabletNum); GaugeMetric<Long> tabletMaxCompactionScore = (GaugeMetric<Long>) new GaugeMetric<Long>( TABLET_MAX_COMPACTION_SCORE, MetricUnit.NOUNIT, "tablet max compaction score") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } return be.getTabletMaxCompactionScore(); } }; tabletMaxCompactionScore.addLabel(new MetricLabel("backend", be.getHost() + ":" + be.getHeartbeatPort())); STARROCKS_METRIC_REGISTER.addMetric(tabletMaxCompactionScore); } } public static void updateRoutineLoadProcessMetrics() { List<RoutineLoadJob> jobs = GlobalStateMgr.getCurrentState().getRoutineLoadMgr().getRoutineLoadJobByState( Sets.newHashSet(RoutineLoadJob.JobState.NEED_SCHEDULE, RoutineLoadJob.JobState.PAUSED, RoutineLoadJob.JobState.RUNNING)); List<RoutineLoadJob> kafkaJobs = jobs.stream() .filter(job -> (job instanceof KafkaRoutineLoadJob) && ((KafkaProgress) job.getProgress()).hasPartition()) .collect(Collectors.toList()); if (kafkaJobs.size() <= 0) { return; } List<PKafkaOffsetProxyRequest> requests = new ArrayList<>(); for (RoutineLoadJob job : kafkaJobs) { KafkaRoutineLoadJob kJob = (KafkaRoutineLoadJob) job; try { kJob.convertCustomProperties(false); } catch (DdlException e) { LOG.warn("convert custom properties failed", e); return; } PKafkaOffsetProxyRequest offsetProxyRequest = new PKafkaOffsetProxyRequest(); offsetProxyRequest.kafkaInfo = KafkaUtil.genPKafkaLoadInfo(kJob.getBrokerList(), kJob.getTopic(), ImmutableMap.copyOf(kJob.getConvertedCustomProperties())); offsetProxyRequest.partitionIds = new ArrayList<>( ((KafkaProgress) kJob.getProgress()).getPartitionIdToOffset().keySet()); requests.add(offsetProxyRequest); } List<PKafkaOffsetProxyResult> offsetProxyResults; try { offsetProxyResults = KafkaUtil.getBatchOffsets(requests); } catch (UserException e) { LOG.warn("get batch offsets failed", e); return; } List<GaugeMetricImpl<Long>> routineLoadLags = new ArrayList<>(); for (int i = 0; i < kafkaJobs.size(); i++) { KafkaRoutineLoadJob kJob = (KafkaRoutineLoadJob) kafkaJobs.get(i); ImmutableMap<Integer, Long> partitionIdToProgress = ((KafkaProgress) kJob.getProgress()).getPartitionIdToOffset(); List<Integer> partitionIds = offsetProxyResults.get(i).partitionIds; List<Long> beginningOffsets = offsetProxyResults.get(i).beginningOffsets; List<Long> latestOffsets = offsetProxyResults.get(i).latestOffsets; long maxLag = Long.MIN_VALUE; for (int j = 0; j < partitionIds.size(); j++) { int partitionId = partitionIds.get(j); if (!partitionIdToProgress.containsKey(partitionId)) { continue; } long progress = partitionIdToProgress.get(partitionId); if (progress == KafkaProgress.OFFSET_BEGINNING_VAL) { progress = beginningOffsets.get(j); } maxLag = Math.max(latestOffsets.get(j) - progress, maxLag); } if (maxLag >= Config.min_routine_load_lag_for_metrics) { GaugeMetricImpl<Long> metric = new GaugeMetricImpl<>("routine_load_max_lag_of_partition", MetricUnit.NOUNIT, "routine load kafka lag"); metric.addLabel(new MetricLabel("job_name", kJob.getName())); metric.setValue(maxLag); routineLoadLags.add(metric); } } GAUGE_ROUTINE_LOAD_LAGS = routineLoadLags; } public static synchronized String getMetric(MetricVisitor visitor, MetricsAction.RequestParams requestParams) { if (!isInit) { return ""; } updateMetrics(); JvmService jvmService = new JvmService(); JvmStats jvmStats = jvmService.stats(); visitor.visitJvm(jvmStats); for (Metric metric : STARROCKS_METRIC_REGISTER.getMetrics()) { visitor.visit(metric); } collectDatabaseMetrics(visitor); if (requestParams.isCollectTableMetrics()) { collectTableMetrics(visitor, requestParams.isMinifyTableMetrics()); } if (requestParams.isCollectMVMetrics()) { MaterializedViewMetricsRegistry.collectMaterializedViewMetrics(visitor, requestParams.isMinifyMVMetrics()); } SortedMap<String, Histogram> histograms = METRIC_REGISTER.getHistograms(); for (Map.Entry<String, Histogram> entry : histograms.entrySet()) { visitor.visitHistogram(entry.getKey(), entry.getValue()); } ResourceGroupMetricMgr.visitQueryLatency(); if (Config.enable_routine_load_lag_metrics) { collectRoutineLoadProcessMetrics(visitor); } visitor.getNodeInfo(); return visitor.build(); } private static void updateMetrics() { SYSTEM_METRICS.update(); } private static void collectDatabaseMetrics(MetricVisitor visitor) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); GaugeMetricImpl<Integer> databaseNum = new GaugeMetricImpl<>( "database_num", MetricUnit.OPERATIONS, "count of database"); int dbNum = 0; for (String dbName : dbNames) { Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (null == db) { continue; } dbNum++; GaugeMetricImpl<Integer> tableNum = new GaugeMetricImpl<>( "table_num", MetricUnit.OPERATIONS, "count of table"); tableNum.setValue(db.getTableNumber()); tableNum.addLabel(new MetricLabel("db_name", dbName)); visitor.visit(tableNum); } databaseNum.setValue(dbNum); visitor.visit(databaseNum); } private static void collectRoutineLoadProcessMetrics(MetricVisitor visitor) { for (GaugeMetricImpl<Long> metric : GAUGE_ROUTINE_LOAD_LAGS) { visitor.visit(metric); } } public static synchronized List<Metric> getMetricsByName(String name) { return STARROCKS_METRIC_REGISTER.getMetricsByName(name); } public static void addMetric(Metric<?> metric) { init(); STARROCKS_METRIC_REGISTER.addMetric(metric); } }
class MetricRepo { private static final Logger LOG = LogManager.getLogger(MetricRepo.class); private static final MetricRegistry METRIC_REGISTER = new MetricRegistry(); private static final StarRocksMetricRegistry STARROCKS_METRIC_REGISTER = new StarRocksMetricRegistry(); public static volatile boolean isInit = false; public static final SystemMetrics SYSTEM_METRICS = new SystemMetrics(); public static final String TABLET_NUM = "tablet_num"; public static final String TABLET_MAX_COMPACTION_SCORE = "tablet_max_compaction_score"; public static LongCounterMetric COUNTER_REQUEST_ALL; public static LongCounterMetric COUNTER_QUERY_ALL; public static LongCounterMetric COUNTER_QUERY_ERR; public static LongCounterMetric COUNTER_QUERY_TIMEOUT; public static LongCounterMetric COUNTER_QUERY_SUCCESS; public static LongCounterMetric COUNTER_SLOW_QUERY; public static LongCounterMetric COUNTER_QUERY_QUEUE_PENDING; public static LongCounterMetric COUNTER_QUERY_QUEUE_TOTAL; public static LongCounterMetric COUNTER_QUERY_QUEUE_TIMEOUT; public static LongCounterMetric COUNTER_UNFINISHED_BACKUP_JOB; public static LongCounterMetric COUNTER_UNFINISHED_RESTORE_JOB; public static LongCounterMetric COUNTER_LOAD_ADD; public static LongCounterMetric COUNTER_LOAD_FINISHED; public static LongCounterMetric COUNTER_EDIT_LOG_WRITE; public static LongCounterMetric COUNTER_EDIT_LOG_READ; public static LongCounterMetric COUNTER_EDIT_LOG_SIZE_BYTES; public static LongCounterMetric COUNTER_IMAGE_WRITE; public static LongCounterMetric COUNTER_IMAGE_PUSH; public static LongCounterMetric COUNTER_TXN_REJECT; public static LongCounterMetric COUNTER_TXN_BEGIN; public static LongCounterMetric COUNTER_TXN_FAILED; public static LongCounterMetric COUNTER_TXN_SUCCESS; public static LongCounterMetric COUNTER_ROUTINE_LOAD_ROWS; public static LongCounterMetric COUNTER_ROUTINE_LOAD_RECEIVED_BYTES; public static LongCounterMetric COUNTER_ROUTINE_LOAD_ERROR_ROWS; public static LongCounterMetric COUNTER_ROUTINE_LOAD_PAUSED; public static Histogram HISTO_QUERY_LATENCY; public static Histogram HISTO_EDIT_LOG_WRITE_LATENCY; public static Histogram HISTO_JOURNAL_WRITE_LATENCY; public static Histogram HISTO_JOURNAL_WRITE_BATCH; public static Histogram HISTO_JOURNAL_WRITE_BYTES; public static GaugeMetricImpl<Double> GAUGE_QUERY_PER_SECOND; public static GaugeMetricImpl<Double> GAUGE_REQUEST_PER_SECOND; public static GaugeMetricImpl<Double> GAUGE_QUERY_ERR_RATE; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_MEAN; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_MEDIAN; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P75; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P90; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P95; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P99; public static GaugeMetricImpl<Double> GAUGE_QUERY_LATENCY_P999; public static GaugeMetricImpl<Long> GAUGE_MAX_TABLET_COMPACTION_SCORE; public static GaugeMetricImpl<Long> GAUGE_STACKED_JOURNAL_NUM; public static List<GaugeMetricImpl<Long>> GAUGE_ROUTINE_LOAD_LAGS; public static GaugeMetricImpl<Integer> GAUGE_SAFE_MODE; private static final ScheduledThreadPoolExecutor METRIC_TIMER = ThreadPoolManager.newDaemonScheduledThreadPool(1, "Metric-Timer-Pool", true); private static final MetricCalculator METRIC_CALCULATOR = new MetricCalculator(); public static synchronized void init() { if (isInit) { return; } GAUGE_ROUTINE_LOAD_LAGS = new ArrayList<>(); LoadMgr loadManger = GlobalStateMgr.getCurrentState().getLoadMgr(); for (EtlJobType jobType : EtlJobType.values()) { if (jobType == EtlJobType.MINI || jobType == EtlJobType.UNKNOWN) { continue; } for (JobState state : JobState.values()) { GaugeMetric<Long> gauge = new GaugeMetric<Long>("job", MetricUnit.NOUNIT, "job statistics") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } return loadManger.getLoadJobNum(state, jobType); } }; gauge.addLabel(new MetricLabel("job", "load")) .addLabel(new MetricLabel("type", jobType.name())) .addLabel(new MetricLabel("state", state.name())); STARROCKS_METRIC_REGISTER.addMetric(gauge); } } AlterJobMgr alter = GlobalStateMgr.getCurrentState().getAlterJobMgr(); for (AlterJobV2.JobType jobType : AlterJobV2.JobType.values()) { if (jobType != AlterJobV2.JobType.SCHEMA_CHANGE && jobType != AlterJobV2.JobType.ROLLUP) { continue; } GaugeMetric<Long> gauge = new GaugeMetric<Long>("job", MetricUnit.NOUNIT, "job statistics") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } if (jobType == AlterJobV2.JobType.SCHEMA_CHANGE) { return alter.getSchemaChangeHandler() .getAlterJobV2Num(AlterJobV2.JobState.RUNNING); } else { return alter.getMaterializedViewHandler() .getAlterJobV2Num(AlterJobV2.JobState.RUNNING); } } }; gauge.addLabel(new MetricLabel("job", "alter")) .addLabel(new MetricLabel("type", jobType.name())) .addLabel(new MetricLabel("state", "running")); STARROCKS_METRIC_REGISTER.addMetric(gauge); } generateBackendsTabletMetrics(); GaugeMetric<Integer> conections = new GaugeMetric<Integer>( "connection_total", MetricUnit.CONNECTIONS, "total connections") { @Override public Integer getValue() { return ExecuteEnv.getInstance().getScheduler().getConnectionNum(); } }; STARROCKS_METRIC_REGISTER.addMetric(conections); GaugeMetric<Long> maxJournalId = (GaugeMetric<Long>) new GaugeMetric<Long>( "max_journal_id", MetricUnit.NOUNIT, "max journal id of this frontends") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getMaxJournalId(); } }; STARROCKS_METRIC_REGISTER.addMetric(maxJournalId); GaugeMetric<Long> metaLogCount = new GaugeMetric<Long>( "meta_log_count", MetricUnit.NOUNIT, "meta log total count") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getMaxJournalId() - GlobalStateMgr.getCurrentState().getImageJournalId(); } }; STARROCKS_METRIC_REGISTER.addMetric(metaLogCount); GaugeMetric<Long> scheduledTabletNum = (GaugeMetric<Long>) new GaugeMetric<Long>( "scheduled_tablet_num", MetricUnit.NOUNIT, "number of tablets being scheduled") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } return (long) GlobalStateMgr.getCurrentState().getTabletScheduler().getTotalNum(); } }; STARROCKS_METRIC_REGISTER.addMetric(scheduledTabletNum); RoutineLoadMgr routineLoadManger = GlobalStateMgr.getCurrentState().getRoutineLoadMgr(); for (RoutineLoadJob.JobState state : RoutineLoadJob.JobState.values()) { GaugeMetric<Long> gauge = new GaugeMetric<Long>("routine_load_jobs", MetricUnit.NOUNIT, "routine load jobs") { @Override public Long getValue() { if (null == routineLoadManger) { return 0L; } return (long) routineLoadManger.getRoutineLoadJobByState(Sets.newHashSet(state)).size(); } }; gauge.addLabel(new MetricLabel("state", state.name())); STARROCKS_METRIC_REGISTER.addMetric(gauge); } GAUGE_QUERY_PER_SECOND = new GaugeMetricImpl<>("qps", MetricUnit.NOUNIT, "query per second"); GAUGE_QUERY_PER_SECOND.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_PER_SECOND); GAUGE_REQUEST_PER_SECOND = new GaugeMetricImpl<>("rps", MetricUnit.NOUNIT, "request per second"); GAUGE_REQUEST_PER_SECOND.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_REQUEST_PER_SECOND); GAUGE_QUERY_ERR_RATE = new GaugeMetricImpl<>("query_err_rate", MetricUnit.NOUNIT, "query error rate"); GAUGE_QUERY_ERR_RATE.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_ERR_RATE); GAUGE_MAX_TABLET_COMPACTION_SCORE = new GaugeMetricImpl<>("max_tablet_compaction_score", MetricUnit.NOUNIT, "max tablet compaction score of all backends"); GAUGE_MAX_TABLET_COMPACTION_SCORE.setValue(0L); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_MAX_TABLET_COMPACTION_SCORE); GAUGE_STACKED_JOURNAL_NUM = new GaugeMetricImpl<>( "editlog_stacked_num", MetricUnit.OPERATIONS, "counter of edit log that are stacked"); GAUGE_STACKED_JOURNAL_NUM.setValue(0L); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_STACKED_JOURNAL_NUM); GAUGE_QUERY_LATENCY_MEAN = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "mean of query latency"); GAUGE_QUERY_LATENCY_MEAN.addLabel(new MetricLabel("type", "mean")); GAUGE_QUERY_LATENCY_MEAN.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_MEAN); GAUGE_QUERY_LATENCY_MEDIAN = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "median of query latency"); GAUGE_QUERY_LATENCY_MEDIAN.addLabel(new MetricLabel("type", "50_quantile")); GAUGE_QUERY_LATENCY_MEDIAN.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_MEDIAN); GAUGE_QUERY_LATENCY_P75 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p75 of query latency"); GAUGE_QUERY_LATENCY_P75.addLabel(new MetricLabel("type", "75_quantile")); GAUGE_QUERY_LATENCY_P75.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P75); GAUGE_QUERY_LATENCY_P90 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p90 of query latency"); GAUGE_QUERY_LATENCY_P90.addLabel(new MetricLabel("type", "90_quantile")); GAUGE_QUERY_LATENCY_P90.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P90); GAUGE_QUERY_LATENCY_P95 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p95 of query latency"); GAUGE_QUERY_LATENCY_P95.addLabel(new MetricLabel("type", "95_quantile")); GAUGE_QUERY_LATENCY_P95.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P95); GAUGE_QUERY_LATENCY_P99 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p99 of query latency"); GAUGE_QUERY_LATENCY_P99.addLabel(new MetricLabel("type", "99_quantile")); GAUGE_QUERY_LATENCY_P99.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P99); GAUGE_QUERY_LATENCY_P999 = new GaugeMetricImpl<>("query_latency", MetricUnit.MILLISECONDS, "p999 of query latency"); GAUGE_QUERY_LATENCY_P999.addLabel(new MetricLabel("type", "999_quantile")); GAUGE_QUERY_LATENCY_P999.setValue(0.0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_QUERY_LATENCY_P999); GAUGE_SAFE_MODE = new GaugeMetricImpl<>("safe_mode", MetricUnit.NOUNIT, "safe mode flag"); GAUGE_SAFE_MODE.addLabel(new MetricLabel("type", "safe_mode")); GAUGE_SAFE_MODE.setValue(0); STARROCKS_METRIC_REGISTER.addMetric(GAUGE_SAFE_MODE); COUNTER_REQUEST_ALL = new LongCounterMetric("request_total", MetricUnit.REQUESTS, "total request"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_REQUEST_ALL); COUNTER_QUERY_ALL = new LongCounterMetric("query_total", MetricUnit.REQUESTS, "total query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_ALL); COUNTER_QUERY_ERR = new LongCounterMetric("query_err", MetricUnit.REQUESTS, "total error query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_ERR); COUNTER_QUERY_TIMEOUT = new LongCounterMetric("query_timeout", MetricUnit.REQUESTS, "total timeout query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_TIMEOUT); COUNTER_QUERY_SUCCESS = new LongCounterMetric("query_success", MetricUnit.REQUESTS, "total success query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_SUCCESS); COUNTER_SLOW_QUERY = new LongCounterMetric("slow_query", MetricUnit.REQUESTS, "total slow query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_SLOW_QUERY); COUNTER_QUERY_QUEUE_PENDING = new LongCounterMetric("query_queue_pending", MetricUnit.REQUESTS, "total pending query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_QUEUE_PENDING); COUNTER_QUERY_QUEUE_TOTAL = new LongCounterMetric("query_queue_total", MetricUnit.REQUESTS, "total history queued query"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_QUEUE_TOTAL); COUNTER_QUERY_QUEUE_TIMEOUT = new LongCounterMetric("query_queue_timeout", MetricUnit.REQUESTS, "total history query for timeout in queue"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_QUERY_QUEUE_TIMEOUT); COUNTER_LOAD_ADD = new LongCounterMetric("load_add", MetricUnit.REQUESTS, "total load submit"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_LOAD_ADD); COUNTER_ROUTINE_LOAD_PAUSED = new LongCounterMetric("routine_load_paused", MetricUnit.REQUESTS, "counter of routine load paused"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_ROUTINE_LOAD_PAUSED); COUNTER_LOAD_FINISHED = new LongCounterMetric("load_finished", MetricUnit.REQUESTS, "total load finished"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_LOAD_FINISHED); COUNTER_EDIT_LOG_WRITE = new LongCounterMetric("edit_log_write", MetricUnit.OPERATIONS, "counter of edit log write into bdbje"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_EDIT_LOG_WRITE); COUNTER_EDIT_LOG_READ = new LongCounterMetric("edit_log_read", MetricUnit.OPERATIONS, "counter of edit log read from bdbje"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_EDIT_LOG_READ); COUNTER_EDIT_LOG_SIZE_BYTES = new LongCounterMetric("edit_log_size_bytes", MetricUnit.BYTES, "size of edit log"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_EDIT_LOG_SIZE_BYTES); COUNTER_IMAGE_WRITE = new LongCounterMetric("image_write", MetricUnit.OPERATIONS, "counter of image generated"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_IMAGE_WRITE); COUNTER_IMAGE_PUSH = new LongCounterMetric("image_push", MetricUnit.OPERATIONS, "counter of image succeeded in pushing to other frontends"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_IMAGE_PUSH); COUNTER_TXN_REJECT = new LongCounterMetric("txn_reject", MetricUnit.REQUESTS, "counter of rejected transactions"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_TXN_REJECT); COUNTER_TXN_BEGIN = new LongCounterMetric("txn_begin", MetricUnit.REQUESTS, "counter of begining transactions"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_TXN_BEGIN); COUNTER_TXN_SUCCESS = new LongCounterMetric("txn_success", MetricUnit.REQUESTS, "counter of success transactions"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_TXN_SUCCESS); COUNTER_TXN_FAILED = new LongCounterMetric("txn_failed", MetricUnit.REQUESTS, "counter of failed transactions"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_TXN_FAILED); COUNTER_ROUTINE_LOAD_ROWS = new LongCounterMetric("routine_load_rows", MetricUnit.ROWS, "total rows of routine load"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_ROUTINE_LOAD_ROWS); COUNTER_ROUTINE_LOAD_RECEIVED_BYTES = new LongCounterMetric("routine_load_receive_bytes", MetricUnit.BYTES, "total received bytes of routine load"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_ROUTINE_LOAD_RECEIVED_BYTES); COUNTER_ROUTINE_LOAD_ERROR_ROWS = new LongCounterMetric("routine_load_error_rows", MetricUnit.ROWS, "total error rows of routine load"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_ROUTINE_LOAD_ERROR_ROWS); COUNTER_UNFINISHED_BACKUP_JOB = new LongCounterMetric("unfinished_backup_job", MetricUnit.REQUESTS, "current unfinished backup job"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_UNFINISHED_BACKUP_JOB); COUNTER_UNFINISHED_RESTORE_JOB = new LongCounterMetric("unfinished_restore_job", MetricUnit.REQUESTS, "current unfinished restore job"); STARROCKS_METRIC_REGISTER.addMetric(COUNTER_UNFINISHED_RESTORE_JOB); List<Database> dbs = Lists.newArrayList(); if (GlobalStateMgr.getCurrentState().getIdToDb() != null) { for (Map.Entry<Long, Database> entry : GlobalStateMgr.getCurrentState().getIdToDb().entrySet()) { dbs.add(entry.getValue()); } for (Database db : dbs) { AbstractJob jobI = GlobalStateMgr.getCurrentState().getBackupHandler().getJob(db.getId()); if (jobI instanceof BackupJob && !((BackupJob) jobI).isDone()) { COUNTER_UNFINISHED_BACKUP_JOB.increase(1L); } else if (jobI instanceof RestoreJob && !((RestoreJob) jobI).isDone()) { COUNTER_UNFINISHED_RESTORE_JOB.increase(1L); } } } HISTO_QUERY_LATENCY = METRIC_REGISTER.histogram(MetricRegistry.name("query", "latency", "ms")); HISTO_EDIT_LOG_WRITE_LATENCY = METRIC_REGISTER.histogram(MetricRegistry.name("editlog", "write", "latency", "ms")); HISTO_JOURNAL_WRITE_LATENCY = METRIC_REGISTER.histogram(MetricRegistry.name("journal", "write", "latency", "ms")); HISTO_JOURNAL_WRITE_BATCH = METRIC_REGISTER.histogram(MetricRegistry.name("journal", "write", "batch")); HISTO_JOURNAL_WRITE_BYTES = METRIC_REGISTER.histogram(MetricRegistry.name("journal", "write", "bytes")); initSystemMetrics(); initMemoryMetrics(); updateMetrics(); isInit = true; if (Config.enable_metric_calculator) { METRIC_TIMER.scheduleAtFixedRate(METRIC_CALCULATOR, 0, 15 * 1000L, TimeUnit.MILLISECONDS); } } private static void initSystemMetrics() { GaugeMetric<Long> tcpRetransSegs = (GaugeMetric<Long>) new GaugeMetric<Long>( "snmp", MetricUnit.NOUNIT, "All TCP packets retransmitted") { @Override public Long getValue() { return SYSTEM_METRICS.tcpRetransSegs; } }; tcpRetransSegs.addLabel(new MetricLabel("name", "tcp_retrans_segs")); STARROCKS_METRIC_REGISTER.addMetric(tcpRetransSegs); GaugeMetric<Long> tpcInErrs = (GaugeMetric<Long>) new GaugeMetric<Long>( "snmp", MetricUnit.NOUNIT, "The number of all problematic TCP packets received") { @Override public Long getValue() { return SYSTEM_METRICS.tcpInErrs; } }; tpcInErrs.addLabel(new MetricLabel("name", "tcp_in_errs")); STARROCKS_METRIC_REGISTER.addMetric(tpcInErrs); GaugeMetric<Long> tpcInSegs = (GaugeMetric<Long>) new GaugeMetric<Long>( "snmp", MetricUnit.NOUNIT, "The number of all TCP packets received") { @Override public Long getValue() { return SYSTEM_METRICS.tcpInSegs; } }; tpcInSegs.addLabel(new MetricLabel("name", "tcp_in_segs")); STARROCKS_METRIC_REGISTER.addMetric(tpcInSegs); GaugeMetric<Long> tpcOutSegs = (GaugeMetric<Long>) new GaugeMetric<Long>( "snmp", MetricUnit.NOUNIT, "The number of all TCP packets send with RST") { @Override public Long getValue() { return SYSTEM_METRICS.tcpOutSegs; } }; tpcOutSegs.addLabel(new MetricLabel("name", "tcp_out_segs")); STARROCKS_METRIC_REGISTER.addMetric(tpcOutSegs); } public static void initMemoryMetrics() { GaugeMetric<Long> tabletCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of tablets") { @Override public Long getValue() { return GlobalStateMgr.getCurrentInvertedIndex().getTabletCount(); } }; tabletCnt.addLabel(new MetricLabel("type", "tablet_count")); STARROCKS_METRIC_REGISTER.addMetric(tabletCnt); GaugeMetric<Long> tabletBytes = new GaugeMetric<Long>("memory", MetricUnit.BYTES, "The bytes of tablets") { @Override public Long getValue() { return GlobalStateMgr.getCurrentInvertedIndex().getTabletCount() * SizeEstimator.estimate(new LocalTablet()); } }; tabletBytes.addLabel(new MetricLabel("type", "tablet_bytes")); STARROCKS_METRIC_REGISTER.addMetric(tabletBytes); GaugeMetric<Long> replicaCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of replicas") { @Override public Long getValue() { return GlobalStateMgr.getCurrentInvertedIndex().getReplicaCount(); } }; replicaCnt.addLabel(new MetricLabel("type", "replica_count")); STARROCKS_METRIC_REGISTER.addMetric(replicaCnt); GaugeMetric<Long> replicaBytes = new GaugeMetric<Long>("memory", MetricUnit.BYTES, "The bytes of replicas") { @Override public Long getValue() { return GlobalStateMgr.getCurrentInvertedIndex().getReplicaCount() * SizeEstimator.estimate(new Replica()); } }; replicaBytes.addLabel(new MetricLabel("type", "replica_bytes")); STARROCKS_METRIC_REGISTER.addMetric(replicaBytes); GaugeMetric<Long> txnCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of txns") { @Override public Long getValue() { return (long) GlobalStateMgr.getCurrentGlobalTransactionMgr().getFinishedTransactionNum(); } }; txnCnt.addLabel(new MetricLabel("type", "txn_count")); STARROCKS_METRIC_REGISTER.addMetric(txnCnt); GaugeMetric<Long> txnBytes = new GaugeMetric<Long>("memory", MetricUnit.BYTES, "The bytes of txns") { @Override public Long getValue() { return GlobalStateMgr.getCurrentGlobalTransactionMgr().getFinishedTransactionNum() * SizeEstimator.estimate(new TransactionState()); } }; txnBytes.addLabel(new MetricLabel("type", "txn_bytes")); STARROCKS_METRIC_REGISTER.addMetric(txnBytes); GaugeMetric<Long> txnCallbackCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of txn callbacks") { @Override public Long getValue() { return GlobalStateMgr.getCurrentGlobalTransactionMgr().getCallbackFactory().getCallBackCnt(); } }; txnCallbackCnt.addLabel(new MetricLabel("type", "txn_callback_count")); STARROCKS_METRIC_REGISTER.addMetric(txnCallbackCnt); GaugeMetric<Long> deleteJobCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of delete jobs") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getDeleteMgr().getDeleteJobCount(); } }; deleteJobCnt.addLabel(new MetricLabel("type", "delete_job_count")); STARROCKS_METRIC_REGISTER.addMetric(deleteJobCnt); GaugeMetric<Long> deleteJobInfoCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of delete job info") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getDeleteMgr().getDeleteInfoCount(); } }; deleteJobInfoCnt.addLabel(new MetricLabel("type", "delete_job_info_count")); STARROCKS_METRIC_REGISTER.addMetric(deleteJobInfoCnt); GaugeMetric<Long> taskCnt = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of tasks") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getTaskManager().getTaskCount(); } }; taskCnt.addLabel(new MetricLabel("type", "task_count")); STARROCKS_METRIC_REGISTER.addMetric(taskCnt); GaugeMetric<Long> runningTaskRunCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of running task_run") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager().getRunningTaskRunCount(); } }; runningTaskRunCount.addLabel(new MetricLabel("type", "running_task_run_count")); STARROCKS_METRIC_REGISTER.addMetric(runningTaskRunCount); GaugeMetric<Long> pendingTaskRunCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of pending task_run") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager().getPendingTaskRunCount(); } }; pendingTaskRunCount.addLabel(new MetricLabel("type", "pending_task_run_count")); STARROCKS_METRIC_REGISTER.addMetric(pendingTaskRunCount); GaugeMetric<Long> historyTaskRunCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of history task_run") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager().getHistoryTaskRunCount(); } }; historyTaskRunCount.addLabel(new MetricLabel("type", "history_task_run_count")); STARROCKS_METRIC_REGISTER.addMetric(historyTaskRunCount); GaugeMetric<Long> catalogCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of catalogs") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getCatalogMgr().getCatalogCount(); } }; catalogCount.addLabel(new MetricLabel("type", "catalogs_count")); STARROCKS_METRIC_REGISTER.addMetric(catalogCount); GaugeMetric<Long> insertOverwriteJobCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of insert overwrite jobs") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getInsertOverwriteJobMgr().getJobNum(); } }; insertOverwriteJobCount.addLabel(new MetricLabel("type", "insert_overwrite_jobs_count")); STARROCKS_METRIC_REGISTER.addMetric(insertOverwriteJobCount); GaugeMetric<Long> compactionStatsCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of compaction statistic") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getCompactionMgr().getPartitionStatsCount(); } }; compactionStatsCount.addLabel(new MetricLabel("type", "compaction_stats_count")); STARROCKS_METRIC_REGISTER.addMetric(compactionStatsCount); GaugeMetric<Long> streamLoadTaskCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of stream load tasks") { @Override public Long getValue() { return GlobalStateMgr.getCurrentState().getStreamLoadMgr().getStreamLoadTaskCount(); } }; streamLoadTaskCount.addLabel(new MetricLabel("type", "stream_load_task_count")); STARROCKS_METRIC_REGISTER.addMetric(streamLoadTaskCount); GaugeMetric<Long> queryDetailCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of cached query details") { @Override public Long getValue() { return QueryDetailQueue.getTotalQueriesCount(); } }; queryDetailCount.addLabel(new MetricLabel("type", "query_detail_count")); STARROCKS_METRIC_REGISTER.addMetric(queryDetailCount); GaugeMetric<Long> queryProfileCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of cached query profile") { @Override public Long getValue() { return ProfileManager.getInstance().getQueryProfileCount(); } }; queryProfileCount.addLabel(new MetricLabel("type", "query_profile_count")); STARROCKS_METRIC_REGISTER.addMetric(queryProfileCount); GaugeMetric<Long> loadProfileCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of cached load profile") { @Override public Long getValue() { return ProfileManager.getInstance().getLoadProfileCount(); } }; loadProfileCount.addLabel(new MetricLabel("type", "load_profile_count")); STARROCKS_METRIC_REGISTER.addMetric(loadProfileCount); GaugeMetric<Long> queryCoordinatorCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of running query coordinator") { @Override public Long getValue() { return QeProcessorImpl.INSTANCE.getCoordinatorCount(); } }; queryCoordinatorCount.addLabel(new MetricLabel("type", "query_coordinator_count")); STARROCKS_METRIC_REGISTER.addMetric(queryCoordinatorCount); GaugeMetric<Long> agentTaskCount = new GaugeMetric<Long>("memory", MetricUnit.NOUNIT, "The count of agent task") { @Override public Long getValue() { return (long) AgentTaskQueue.getTaskNum(); } }; agentTaskCount.addLabel(new MetricLabel("type", "agent_task_count")); STARROCKS_METRIC_REGISTER.addMetric(agentTaskCount); } public static void generateBackendsTabletMetrics() { STARROCKS_METRIC_REGISTER.removeMetrics(TABLET_NUM); STARROCKS_METRIC_REGISTER.removeMetrics(TABLET_MAX_COMPACTION_SCORE); SystemInfoService infoService = GlobalStateMgr.getCurrentSystemInfo(); TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex(); for (Long beId : infoService.getBackendIds(false)) { Backend be = infoService.getBackend(beId); if (be == null) { continue; } GaugeMetric<Long> tabletNum = (GaugeMetric<Long>) new GaugeMetric<Long>(TABLET_NUM, MetricUnit.NOUNIT, "tablet number") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } return invertedIndex.getTabletNumByBackendId(beId); } }; tabletNum.addLabel(new MetricLabel("backend", be.getHost() + ":" + be.getHeartbeatPort())); STARROCKS_METRIC_REGISTER.addMetric(tabletNum); GaugeMetric<Long> tabletMaxCompactionScore = (GaugeMetric<Long>) new GaugeMetric<Long>( TABLET_MAX_COMPACTION_SCORE, MetricUnit.NOUNIT, "tablet max compaction score") { @Override public Long getValue() { if (!GlobalStateMgr.getCurrentState().isLeader()) { return 0L; } return be.getTabletMaxCompactionScore(); } }; tabletMaxCompactionScore.addLabel(new MetricLabel("backend", be.getHost() + ":" + be.getHeartbeatPort())); STARROCKS_METRIC_REGISTER.addMetric(tabletMaxCompactionScore); } } public static void updateRoutineLoadProcessMetrics() { List<RoutineLoadJob> jobs = GlobalStateMgr.getCurrentState().getRoutineLoadMgr().getRoutineLoadJobByState( Sets.newHashSet(RoutineLoadJob.JobState.NEED_SCHEDULE, RoutineLoadJob.JobState.PAUSED, RoutineLoadJob.JobState.RUNNING)); List<RoutineLoadJob> kafkaJobs = jobs.stream() .filter(job -> (job instanceof KafkaRoutineLoadJob) && ((KafkaProgress) job.getProgress()).hasPartition()) .collect(Collectors.toList()); if (kafkaJobs.size() <= 0) { return; } List<PKafkaOffsetProxyRequest> requests = new ArrayList<>(); for (RoutineLoadJob job : kafkaJobs) { KafkaRoutineLoadJob kJob = (KafkaRoutineLoadJob) job; try { kJob.convertCustomProperties(false); } catch (DdlException e) { LOG.warn("convert custom properties failed", e); return; } PKafkaOffsetProxyRequest offsetProxyRequest = new PKafkaOffsetProxyRequest(); offsetProxyRequest.kafkaInfo = KafkaUtil.genPKafkaLoadInfo(kJob.getBrokerList(), kJob.getTopic(), ImmutableMap.copyOf(kJob.getConvertedCustomProperties())); offsetProxyRequest.partitionIds = new ArrayList<>( ((KafkaProgress) kJob.getProgress()).getPartitionIdToOffset().keySet()); requests.add(offsetProxyRequest); } List<PKafkaOffsetProxyResult> offsetProxyResults; try { offsetProxyResults = KafkaUtil.getBatchOffsets(requests); } catch (UserException e) { LOG.warn("get batch offsets failed", e); return; } List<GaugeMetricImpl<Long>> routineLoadLags = new ArrayList<>(); for (int i = 0; i < kafkaJobs.size(); i++) { KafkaRoutineLoadJob kJob = (KafkaRoutineLoadJob) kafkaJobs.get(i); ImmutableMap<Integer, Long> partitionIdToProgress = ((KafkaProgress) kJob.getProgress()).getPartitionIdToOffset(); List<Integer> partitionIds = offsetProxyResults.get(i).partitionIds; List<Long> beginningOffsets = offsetProxyResults.get(i).beginningOffsets; List<Long> latestOffsets = offsetProxyResults.get(i).latestOffsets; long maxLag = Long.MIN_VALUE; for (int j = 0; j < partitionIds.size(); j++) { int partitionId = partitionIds.get(j); if (!partitionIdToProgress.containsKey(partitionId)) { continue; } long progress = partitionIdToProgress.get(partitionId); if (progress == KafkaProgress.OFFSET_BEGINNING_VAL) { progress = beginningOffsets.get(j); } maxLag = Math.max(latestOffsets.get(j) - progress, maxLag); } if (maxLag >= Config.min_routine_load_lag_for_metrics) { GaugeMetricImpl<Long> metric = new GaugeMetricImpl<>("routine_load_max_lag_of_partition", MetricUnit.NOUNIT, "routine load kafka lag"); metric.addLabel(new MetricLabel("job_name", kJob.getName())); metric.setValue(maxLag); routineLoadLags.add(metric); } } GAUGE_ROUTINE_LOAD_LAGS = routineLoadLags; } public static synchronized String getMetric(MetricVisitor visitor, MetricsAction.RequestParams requestParams) { if (!isInit) { return ""; } updateMetrics(); JvmService jvmService = new JvmService(); JvmStats jvmStats = jvmService.stats(); visitor.visitJvm(jvmStats); for (Metric metric : STARROCKS_METRIC_REGISTER.getMetrics()) { visitor.visit(metric); } collectDatabaseMetrics(visitor); if (requestParams.isCollectTableMetrics()) { collectTableMetrics(visitor, requestParams.isMinifyTableMetrics()); } if (requestParams.isCollectMVMetrics()) { MaterializedViewMetricsRegistry.collectMaterializedViewMetrics(visitor, requestParams.isMinifyMVMetrics()); } SortedMap<String, Histogram> histograms = METRIC_REGISTER.getHistograms(); for (Map.Entry<String, Histogram> entry : histograms.entrySet()) { visitor.visitHistogram(entry.getKey(), entry.getValue()); } ResourceGroupMetricMgr.visitQueryLatency(); if (Config.enable_routine_load_lag_metrics) { collectRoutineLoadProcessMetrics(visitor); } visitor.getNodeInfo(); return visitor.build(); } private static void updateMetrics() { SYSTEM_METRICS.update(); } private static void collectDatabaseMetrics(MetricVisitor visitor) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); GaugeMetricImpl<Integer> databaseNum = new GaugeMetricImpl<>( "database_num", MetricUnit.OPERATIONS, "count of database"); int dbNum = 0; for (String dbName : dbNames) { Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (null == db) { continue; } dbNum++; GaugeMetricImpl<Integer> tableNum = new GaugeMetricImpl<>( "table_num", MetricUnit.OPERATIONS, "count of table"); tableNum.setValue(db.getTableNumber()); tableNum.addLabel(new MetricLabel("db_name", dbName)); visitor.visit(tableNum); } databaseNum.setValue(dbNum); visitor.visit(databaseNum); } private static void collectRoutineLoadProcessMetrics(MetricVisitor visitor) { for (GaugeMetricImpl<Long> metric : GAUGE_ROUTINE_LOAD_LAGS) { visitor.visit(metric); } } public static synchronized List<Metric> getMetricsByName(String name) { return STARROCKS_METRIC_REGISTER.getMetricsByName(name); } public static void addMetric(Metric<?> metric) { init(); STARROCKS_METRIC_REGISTER.addMetric(metric); } }
Actually, the JsonParser would detect this ...
static void fillTensor(TokenBuffer buffer, TensorFieldValue tensorFieldValue) { Tensor.Builder builder = Tensor.Builder.of(tensorFieldValue.getDataType().getTensorType()); expectOneOf(buffer.current(), JsonToken.START_OBJECT, JsonToken.START_ARRAY); int initNesting = buffer.nesting(); while ( ! buffer.isEmpty()) { Supplier<Token> lookahead = buffer.lookahead(); Token next = lookahead.get(); if (TENSOR_CELLS.equals(next.name) && ! primitiveContent(next.token, lookahead.get().token)) { buffer.next(); readTensorCells(buffer, builder); } else if (TENSOR_VALUES.equals(next.name) && builder.type().dimensions().stream().allMatch(Dimension::isIndexed)) { buffer.next(); readTensorValues(buffer, builder); } else if (TENSOR_BLOCKS.equals(next.name)) { buffer.next(); readTensorBlocks(buffer, builder); } else if (TENSOR_TYPE.equals(next.name) && next.token == JsonToken.VALUE_STRING) { buffer.next(); } else if (buffer.nesting() == initNesting && JsonToken.END_OBJECT == next.token) { buffer.next(); break; } else { readDirectTensorValue(buffer, builder); break; } } if (buffer.nesting() + 1 != initNesting) throw new IllegalArgumentException("incomplete JSON structure for " + tensorFieldValue); expectOneOf(buffer.current(), JsonToken.END_OBJECT, JsonToken.END_ARRAY); tensorFieldValue.assign(builder.build()); }
throw new IllegalArgumentException("incomplete JSON structure for " + tensorFieldValue);
static void fillTensor(TokenBuffer buffer, TensorFieldValue tensorFieldValue) { Tensor.Builder builder = Tensor.Builder.of(tensorFieldValue.getDataType().getTensorType()); expectOneOf(buffer.current(), JsonToken.START_OBJECT, JsonToken.START_ARRAY); int initNesting = buffer.nesting(); while (true) { Supplier<Token> lookahead = buffer.lookahead(); Token next = lookahead.get(); if (TENSOR_CELLS.equals(next.name) && ! primitiveContent(next.token, lookahead.get().token)) { buffer.next(); readTensorCells(buffer, builder); } else if (TENSOR_VALUES.equals(next.name) && builder.type().dimensions().stream().allMatch(Dimension::isIndexed)) { buffer.next(); readTensorValues(buffer, builder); } else if (TENSOR_BLOCKS.equals(next.name)) { buffer.next(); readTensorBlocks(buffer, builder); } else if (TENSOR_TYPE.equals(next.name) && next.token == JsonToken.VALUE_STRING) { buffer.next(); } else if (buffer.nesting() == initNesting && JsonToken.END_OBJECT == next.token) { buffer.next(); break; } else { readDirectTensorValue(buffer, builder); break; } } expectOneOf(buffer.current(), JsonToken.END_OBJECT, JsonToken.END_ARRAY); tensorFieldValue.assign(builder.build()); }
class TensorReader { public static final String TENSOR_TYPE = "type"; public static final String TENSOR_ADDRESS = "address"; public static final String TENSOR_CELLS = "cells"; public static final String TENSOR_VALUES = "values"; public static final String TENSOR_BLOCKS = "blocks"; public static final String TENSOR_VALUE = "value"; static boolean primitiveContent(JsonToken current, JsonToken next) { if (current.isScalarValue()) return true; if (current == JsonToken.START_ARRAY) { if (next == JsonToken.END_ARRAY) return false; if (next.isScalarValue()) return true; } return false; } static void readTensorCells(TokenBuffer buffer, Tensor.Builder builder) { if (buffer.current() == JsonToken.START_ARRAY) { int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) readTensorCell(buffer, builder); } else if (buffer.current() == JsonToken.START_OBJECT) { int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) builder.cell(asAddress(buffer.currentName(), builder.type()), readDouble(buffer)); } else { throw new IllegalArgumentException("Expected 'cells' to contain an array or an object, but got " + buffer.current()); } expectCompositeEnd(buffer.current()); } private static void readTensorCell(TokenBuffer buffer, Tensor.Builder builder) { expectObjectStart(buffer.current()); TensorAddress address = null; Double value = null; int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { String currentName = buffer.currentName(); if (TensorReader.TENSOR_ADDRESS.equals(currentName)) { address = readAddress(buffer, builder.type()); } else if (TensorReader.TENSOR_VALUE.equals(currentName)) { value = readDouble(buffer); } } expectObjectEnd(buffer.current()); if (address == null) throw new IllegalArgumentException("Expected an object in a tensor 'cells' array to contain an 'address' field"); if (value == null) throw new IllegalArgumentException("Expected an object in a tensor 'cells' array to contain a 'value' field"); builder.cell(address, value); } private static void readTensorValues(TokenBuffer buffer, Tensor.Builder builder) { if ( ! (builder instanceof IndexedTensor.BoundBuilder indexedBuilder)) throw new IllegalArgumentException("The 'values' field can only be used with dense tensors. " + "Use 'cells' or 'blocks' instead"); if (buffer.current() == JsonToken.VALUE_STRING) { double[] decoded = decodeHexString(buffer.currentText(), builder.type().valueType()); if (decoded.length == 0) throw new IllegalArgumentException("The 'values' string does not contain any values"); for (int i = 0; i < decoded.length; i++) { indexedBuilder.cellByDirectIndex(i, decoded[i]); } return; } int index = 0; int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { if (buffer.current() == JsonToken.START_ARRAY || buffer.current() == JsonToken.END_ARRAY) continue; indexedBuilder.cellByDirectIndex(index++, readDouble(buffer)); } if (index == 0) throw new IllegalArgumentException("The 'values' array does not contain any values"); expectCompositeEnd(buffer.current()); } static void readTensorBlocks(TokenBuffer buffer, Tensor.Builder builder) { if ( ! (builder instanceof MixedTensor.BoundBuilder mixedBuilder)) throw new IllegalArgumentException("The 'blocks' field can only be used with mixed tensors with bound dimensions. " + "Use 'cells' or 'values' instead"); if (buffer.current() == JsonToken.START_ARRAY) { int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) readTensorBlock(buffer, mixedBuilder); } else if (buffer.current() == JsonToken.START_OBJECT) { int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { TensorAddress mappedAddress = asAddress(buffer.currentName(), builder.type().mappedSubtype()); mixedBuilder.block(mappedAddress, readValues(buffer, (int) mixedBuilder.denseSubspaceSize(), mappedAddress, mixedBuilder.type())); } } else { throw new IllegalArgumentException("Expected 'blocks' to contain an array or an object, but got " + buffer.current()); } expectCompositeEnd(buffer.current()); } private static void readTensorBlock(TokenBuffer buffer, MixedTensor.BoundBuilder mixedBuilder) { expectObjectStart(buffer.current()); TensorAddress address = null; double[] values = null; int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { String currentName = buffer.currentName(); if (TensorReader.TENSOR_ADDRESS.equals(currentName)) address = readAddress(buffer, mixedBuilder.type().mappedSubtype()); else if (TensorReader.TENSOR_VALUES.equals(currentName)) values = readValues(buffer, (int)mixedBuilder.denseSubspaceSize(), address, mixedBuilder.type()); } expectObjectEnd(buffer.current()); if (address == null) throw new IllegalArgumentException("Expected a 'blocks' array object to contain an object 'address'"); if (values == null) throw new IllegalArgumentException("Expected a 'blocks' array object to contain an array 'values'"); mixedBuilder.block(address, values); } /** Reads a tensor value directly at the root, where the format is decided by the tensor type. */ private static void readDirectTensorValue(TokenBuffer buffer, Tensor.Builder builder) { boolean hasIndexed = builder.type().dimensions().stream().anyMatch(TensorType.Dimension::isIndexed); boolean hasMapped = builder.type().dimensions().stream().anyMatch(TensorType.Dimension::isMapped); if (isArrayOfObjects(buffer)) readTensorCells(buffer, builder); else if ( ! hasMapped) readTensorValues(buffer, builder); else if (hasMapped && hasIndexed) readTensorBlocks(buffer, builder); else readTensorCells(buffer, builder); } private static boolean isArrayOfObjects(TokenBuffer buffer) { if (buffer.current() != JsonToken.START_ARRAY) return false; Supplier<Token> lookahead = buffer.lookahead(); Token next; while ((next = lookahead.get()).token == JsonToken.START_ARRAY); return next.token == JsonToken.START_OBJECT; } private static TensorAddress readAddress(TokenBuffer buffer, TensorType type) { expectObjectStart(buffer.current()); TensorAddress.Builder builder = new TensorAddress.Builder(type); int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) builder.add(buffer.currentName(), buffer.currentText()); expectObjectEnd(buffer.current()); return builder.build(); } /** * Reads values for a tensor subspace block * * @param buffer the buffer containing the values * @param size the expected number of values * @param address the address for the block for error reporting, or null if not known * @param type the type of the tensor we are reading * @return the values read */ private static double[] readValues(TokenBuffer buffer, int size, TensorAddress address, TensorType type) { int index = 0; double[] values = new double[size]; if (buffer.current() == JsonToken.VALUE_STRING) { values = decodeHexString(buffer.currentText(), type.valueType()); index = values.length; } else { expectArrayStart(buffer.current()); int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { if (buffer.current() == JsonToken.START_ARRAY || buffer.current() == JsonToken.END_ARRAY) continue; values[index++] = readDouble(buffer); } expectCompositeEnd(buffer.current()); } if (index != size) throw new IllegalArgumentException((address != null ? "At " + address.toString(type) + ": " : "") + "Expected " + size + " values, but got " + index); return values; } private static double readDouble(TokenBuffer buffer) { try { if (buffer.current() == JsonToken.VALUE_STRING) { return decodeNumberString(buffer.currentText()); } return Double.parseDouble(buffer.currentText()); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected a number but got '" + buffer.currentText() + "'"); } } private static TensorAddress asAddress(String label, TensorType type) { if (type.dimensions().size() != 1) throw new IllegalArgumentException("Expected a tensor with a single dimension but got '" + type + "'"); return new TensorAddress.Builder(type).add(type.dimensions().get(0).name(), label).build(); } }
class TensorReader { public static final String TENSOR_TYPE = "type"; public static final String TENSOR_ADDRESS = "address"; public static final String TENSOR_CELLS = "cells"; public static final String TENSOR_VALUES = "values"; public static final String TENSOR_BLOCKS = "blocks"; public static final String TENSOR_VALUE = "value"; static boolean primitiveContent(JsonToken current, JsonToken next) { if (current.isScalarValue()) return true; if (current == JsonToken.START_ARRAY) { if (next == JsonToken.END_ARRAY) return false; if (next.isScalarValue()) return true; } return false; } static void readTensorCells(TokenBuffer buffer, Tensor.Builder builder) { if (buffer.current() == JsonToken.START_ARRAY) { int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) readTensorCell(buffer, builder); } else if (buffer.current() == JsonToken.START_OBJECT) { int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) builder.cell(asAddress(buffer.currentName(), builder.type()), readDouble(buffer)); } else { throw new IllegalArgumentException("Expected 'cells' to contain an array or an object, but got " + buffer.current()); } expectCompositeEnd(buffer.current()); } private static void readTensorCell(TokenBuffer buffer, Tensor.Builder builder) { expectObjectStart(buffer.current()); TensorAddress address = null; Double value = null; int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { String currentName = buffer.currentName(); if (TensorReader.TENSOR_ADDRESS.equals(currentName)) { address = readAddress(buffer, builder.type()); } else if (TensorReader.TENSOR_VALUE.equals(currentName)) { value = readDouble(buffer); } } expectObjectEnd(buffer.current()); if (address == null) throw new IllegalArgumentException("Expected an object in a tensor 'cells' array to contain an 'address' field"); if (value == null) throw new IllegalArgumentException("Expected an object in a tensor 'cells' array to contain a 'value' field"); builder.cell(address, value); } private static void readTensorValues(TokenBuffer buffer, Tensor.Builder builder) { if ( ! (builder instanceof IndexedTensor.BoundBuilder indexedBuilder)) throw new IllegalArgumentException("The 'values' field can only be used with dense tensors. " + "Use 'cells' or 'blocks' instead"); if (buffer.current() == JsonToken.VALUE_STRING) { double[] decoded = decodeHexString(buffer.currentText(), builder.type().valueType()); if (decoded.length == 0) throw new IllegalArgumentException("The 'values' string does not contain any values"); for (int i = 0; i < decoded.length; i++) { indexedBuilder.cellByDirectIndex(i, decoded[i]); } return; } int index = 0; int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { if (buffer.current() == JsonToken.START_ARRAY || buffer.current() == JsonToken.END_ARRAY) continue; indexedBuilder.cellByDirectIndex(index++, readDouble(buffer)); } if (index == 0) throw new IllegalArgumentException("The 'values' array does not contain any values"); expectCompositeEnd(buffer.current()); } static void readTensorBlocks(TokenBuffer buffer, Tensor.Builder builder) { if ( ! (builder instanceof MixedTensor.BoundBuilder mixedBuilder)) throw new IllegalArgumentException("The 'blocks' field can only be used with mixed tensors with bound dimensions. " + "Use 'cells' or 'values' instead"); if (buffer.current() == JsonToken.START_ARRAY) { int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) readTensorBlock(buffer, mixedBuilder); } else if (buffer.current() == JsonToken.START_OBJECT) { int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { TensorAddress mappedAddress = asAddress(buffer.currentName(), builder.type().mappedSubtype()); mixedBuilder.block(mappedAddress, readValues(buffer, (int) mixedBuilder.denseSubspaceSize(), mappedAddress, mixedBuilder.type())); } } else { throw new IllegalArgumentException("Expected 'blocks' to contain an array or an object, but got " + buffer.current()); } expectCompositeEnd(buffer.current()); } private static void readTensorBlock(TokenBuffer buffer, MixedTensor.BoundBuilder mixedBuilder) { expectObjectStart(buffer.current()); TensorAddress address = null; double[] values = null; int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { String currentName = buffer.currentName(); if (TensorReader.TENSOR_ADDRESS.equals(currentName)) address = readAddress(buffer, mixedBuilder.type().mappedSubtype()); else if (TensorReader.TENSOR_VALUES.equals(currentName)) values = readValues(buffer, (int)mixedBuilder.denseSubspaceSize(), address, mixedBuilder.type()); } expectObjectEnd(buffer.current()); if (address == null) throw new IllegalArgumentException("Expected a 'blocks' array object to contain an object 'address'"); if (values == null) throw new IllegalArgumentException("Expected a 'blocks' array object to contain an array 'values'"); mixedBuilder.block(address, values); } /** Reads a tensor value directly at the root, where the format is decided by the tensor type. */ private static void readDirectTensorValue(TokenBuffer buffer, Tensor.Builder builder) { boolean hasIndexed = builder.type().dimensions().stream().anyMatch(TensorType.Dimension::isIndexed); boolean hasMapped = builder.type().dimensions().stream().anyMatch(TensorType.Dimension::isMapped); if (isArrayOfObjects(buffer)) readTensorCells(buffer, builder); else if ( ! hasMapped) readTensorValues(buffer, builder); else if (hasMapped && hasIndexed) readTensorBlocks(buffer, builder); else readTensorCells(buffer, builder); } private static boolean isArrayOfObjects(TokenBuffer buffer) { if (buffer.current() != JsonToken.START_ARRAY) return false; Supplier<Token> lookahead = buffer.lookahead(); Token next; while ((next = lookahead.get()).token == JsonToken.START_ARRAY) { } return next.token == JsonToken.START_OBJECT; } private static TensorAddress readAddress(TokenBuffer buffer, TensorType type) { expectObjectStart(buffer.current()); TensorAddress.Builder builder = new TensorAddress.Builder(type); int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) builder.add(buffer.currentName(), buffer.currentText()); expectObjectEnd(buffer.current()); return builder.build(); } /** * Reads values for a tensor subspace block * * @param buffer the buffer containing the values * @param size the expected number of values * @param address the address for the block for error reporting, or null if not known * @param type the type of the tensor we are reading * @return the values read */ private static double[] readValues(TokenBuffer buffer, int size, TensorAddress address, TensorType type) { int index = 0; double[] values = new double[size]; if (buffer.current() == JsonToken.VALUE_STRING) { values = decodeHexString(buffer.currentText(), type.valueType()); index = values.length; } else { expectArrayStart(buffer.current()); int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { if (buffer.current() == JsonToken.START_ARRAY || buffer.current() == JsonToken.END_ARRAY) continue; values[index++] = readDouble(buffer); } expectCompositeEnd(buffer.current()); } if (index != size) throw new IllegalArgumentException((address != null ? "At " + address.toString(type) + ": " : "") + "Expected " + size + " values, but got " + index); return values; } private static double readDouble(TokenBuffer buffer) { try { if (buffer.current() == JsonToken.VALUE_STRING) { return decodeNumberString(buffer.currentText()); } return Double.parseDouble(buffer.currentText()); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected a number but got '" + buffer.currentText() + "'"); } } private static TensorAddress asAddress(String label, TensorType type) { if (type.dimensions().size() != 1) throw new IllegalArgumentException("Expected a tensor with a single dimension but got '" + type + "'"); return new TensorAddress.Builder(type).add(type.dimensions().get(0).name(), label).build(); } }
Fixed (Introduced separate `roCellContainng` to handle the readonly case)
public static CellSemType cellContaining(Env env, SemType ty, CellAtomicType.CellMutability mut) { assert !(ty instanceof CellSemType); CellAtomicType atomicCell = CellAtomicType.from(ty, mut); TypeAtom atom = env.cellAtom(atomicCell); BddNode bdd = bddAtom(atom); ComplexSemType complexSemType = PredefinedType.basicSubtype(BasicTypeCode.BT_CELL, bdd); return new CellSemType(complexSemType.subtypeDataList); }
TypeAtom atom = env.cellAtom(atomicCell);
public static CellSemType cellContaining(Env env, SemType ty, CellAtomicType.CellMutability mut) { assert Core.isNever(ty) || !Core.isSubtypeSimple(ty, PredefinedType.CELL); CellAtomicType atomicCell = CellAtomicType.from(ty, mut); TypeAtom atom = env.cellAtom(atomicCell); BddNode bdd = bddAtom(atom); ComplexSemType complexSemType = PredefinedType.basicSubtype(BasicTypeCode.BT_CELL, bdd); return CellSemType.from(complexSemType.subtypeDataList); }
class CellSubtype { public static CellSemType cellContaining(Env env, SemType ty) { return cellContaining(env, ty, CELL_MUT_NONE); } }
class CellSubtype { public static CellSemType cellContaining(Env env, SemType ty) { return cellContaining(env, ty, CELL_MUT_LIMITED); } public static CellSemType roCellContaining(Env env, SemType ty) { return cellContaining(env, ty, CELL_MUT_NONE); } }
According to this, order doesn't matter. https://stackoverflow.com/a/37986481/4220757
private static Mono<Void> generateEvents(AtomicBoolean isRunning) { final Logger logger = LoggerFactory.getLogger("Producer"); final Scheduler scheduler = Schedulers.elastic(); final Duration operationTimeout = Duration.ofSeconds(5); final String[] machineIds = new String[]{"2A", "9B", "6C"}; final Random random = new Random(); final EventHubProducerAsyncClient client = new EventHubClientBuilder() .connectionString(EH_CONNECTION_STRING) .buildAsyncProducerClient(); return Mono.<Void>fromRunnable(() -> { while (isRunning.get()) { int milliseconds = random.nextInt(1000); try { TimeUnit.MILLISECONDS.sleep(milliseconds); } catch (InterruptedException ignored) { } final String machineId = machineIds[random.nextInt(machineIds.length)]; final int temperature = Math.abs(random.nextInt() % 101); logger.info("[{}] Temperature: {}C", machineId, temperature); final EventData event = new EventData(String.valueOf(temperature)); final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(machineId); client.createBatch(batchOptions).flatMap(batch -> { batch.tryAdd(event); return client.send(batch); }).block(operationTimeout); } }).subscribeOn(scheduler) .doFinally(signal -> { logger.info("Disposing of producer."); client.close(); }); }
}).subscribeOn(scheduler)
private static Mono<Void> generateEvents(AtomicBoolean isRunning) { final Logger logger = LoggerFactory.getLogger("Producer"); final Scheduler scheduler = Schedulers.elastic(); final Duration operationTimeout = Duration.ofSeconds(5); final String[] machineIds = new String[]{"2A", "9B", "6C"}; final Random random = new Random(); final EventHubProducerAsyncClient client = new EventHubClientBuilder() .connectionString(EH_CONNECTION_STRING) .buildAsyncProducerClient(); return Mono.<Void>fromRunnable(() -> { while (isRunning.get()) { int milliseconds = random.nextInt(1000); try { TimeUnit.MILLISECONDS.sleep(milliseconds); } catch (InterruptedException ignored) { } final String machineId = machineIds[random.nextInt(machineIds.length)]; final int temperature = Math.abs(random.nextInt() % 101); logger.info("[{}] Temperature: {}C", machineId, temperature); final EventData event = new EventData(String.valueOf(temperature)); final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(machineId); client.createBatch(batchOptions).flatMap(batch -> { batch.tryAdd(event); return client.send(batch); }).block(operationTimeout); } }).subscribeOn(scheduler) .doFinally(signal -> { logger.info("Disposing of producer."); client.close(); }); }
class EventProcessorClientAggregateEventsSample { private static final Duration REPORTING_INTERVAL = Duration.ofSeconds(5); private static final String EH_CONNECTION_STRING = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubName}"; /** * Main method to demonstrate starting and stopping a {@link EventProcessorClient}. * * @param args The input arguments to this executable. * @throws Exception If there are any errors while running the {@link EventProcessorClient}. */ public static void main(String[] args) throws Exception { final MachineEventsProcessor aggregator = new MachineEventsProcessor(REPORTING_INTERVAL); final EventProcessorClient client = new EventProcessorClientBuilder() .consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME) .connectionString(EH_CONNECTION_STRING) .processPartitionInitialization(context -> aggregator.onInitialize(context)) .processPartitionClose(context -> aggregator.onClose(context)) .processEvent(event -> aggregator.onEvent(event)) .processError(error -> aggregator.onError(error)) .checkpointStore(new InMemoryCheckpointStore()) .buildEventProcessorClient(); System.out.println("Starting event processor"); final AtomicBoolean isRunning = new AtomicBoolean(true); client.start(); generateEvents(isRunning).subscribe(); System.out.println("Sleeping..."); Thread.sleep(TimeUnit.SECONDS.toMillis(30)); isRunning.set(false); System.out.println("Stopping event processor"); client.stop(); System.out.println("Exiting process"); } /** * Helper method that generates events for machines "2A", "9B", and "6C" and sends them to the service. */ }
class EventProcessorClientAggregateEventsSample { private static final Duration REPORTING_INTERVAL = Duration.ofSeconds(5); private static final String EH_CONNECTION_STRING = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubName}"; /** * Main method to demonstrate starting and stopping a {@link EventProcessorClient}. * * @param args The input arguments to this executable. * @throws Exception If there are any errors while running the {@link EventProcessorClient}. */ public static void main(String[] args) throws Exception { final MachineEventsProcessor aggregator = new MachineEventsProcessor(REPORTING_INTERVAL); final EventProcessorClient client = new EventProcessorClientBuilder() .consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME) .connectionString(EH_CONNECTION_STRING) .processPartitionInitialization(context -> aggregator.onInitialize(context)) .processPartitionClose(context -> aggregator.onClose(context)) .processEvent(event -> aggregator.onEvent(event)) .processError(error -> aggregator.onError(error)) .checkpointStore(new InMemoryCheckpointStore()) .buildEventProcessorClient(); System.out.println("Starting event processor"); final AtomicBoolean isRunning = new AtomicBoolean(true); client.start(); generateEvents(isRunning).subscribe(); System.out.println("Sleeping..."); Thread.sleep(TimeUnit.SECONDS.toMillis(30)); isRunning.set(false); System.out.println("Stopping event processor"); client.stop(); System.out.println("Exiting process"); } /** * Helper method that generates events for machines "2A", "9B", and "6C" and sends them to the service. */ }
`ScheduledUnit` will just use the `ExecutionVertexID` of the given `Execution`, so that the location preference in this execution param will not take effect. The location preference takes effect in the following slotProfile param(e.g. `slotProfileForLocation(loc1)`)
public void testGetsNonLocalFromSharingGroupFirst() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId()), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId()), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); LogicalSlot s3 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s5 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); LogicalSlot s6 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); assertEquals(s1.getTaskManagerLocation(), s3.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s4.getTaskManagerLocation()); assertEquals(s1.getTaskManagerLocation(), s5.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s6.getTaskManagerLocation()); assertEquals(0, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(5, testingSlotProvider.getNumberOfLocalizedAssignments()); assertTrue(1 == testingSlotProvider.getNumberOfNonLocalizedAssignments() || 1 == testingSlotProvider.getNumberOfHostLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfUnconstrainedAssignments()); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s4.releaseSlot(); s5.releaseSlot(); s6.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); }
new ScheduledUnit(getExecution(jid3, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get();
public void testGetsNonLocalFromSharingGroupFirst() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId()), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId()), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); LogicalSlot s3 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s5 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); LogicalSlot s6 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); assertEquals(s1.getTaskManagerLocation(), s3.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s4.getTaskManagerLocation()); assertEquals(s1.getTaskManagerLocation(), s5.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s6.getTaskManagerLocation()); assertEquals(0, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(5, testingSlotProvider.getNumberOfLocalizedAssignments()); assertTrue(1 == testingSlotProvider.getNumberOfNonLocalizedAssignments() || 1 == testingSlotProvider.getNumberOfHostLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfUnconstrainedAssignments()); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s4.releaseSlot(); s5.releaseSlot(); s6.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); }
class ScheduleWithCoLocationHintTest extends SchedulerTestBase { @Override protected ComponentMainThreadExecutor getComponentMainThreadExecutor() { return ComponentMainThreadExecutorServiceAdapter.forMainThread(); } @Test public void scheduleAllSharedAndCoLocated() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); testingSlotProvider.addTaskManager(2); testingSlotProvider.addTaskManager(2); testingSlotProvider.addTaskManager(2); assertEquals(6, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint c1 = new CoLocationConstraint(ccg); CoLocationConstraint c2 = new CoLocationConstraint(ccg); CoLocationConstraint c3 = new CoLocationConstraint(ccg); CoLocationConstraint c4 = new CoLocationConstraint(ccg); CoLocationConstraint c5 = new CoLocationConstraint(ccg); CoLocationConstraint c6 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 0, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 1, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s3 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 2, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c3), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 3, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c4), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s5 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 0, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s6 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 1, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s7 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 2, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c3), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s8 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 4, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c5), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s9 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 5, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c6), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s10 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 3, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c4), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s11 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 4, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c5), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s12 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 5, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c6), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); assertNotNull(s1); assertNotNull(s2); assertNotNull(s3); assertNotNull(s4); assertNotNull(s5); assertNotNull(s6); assertNotNull(s7); assertNotNull(s8); assertNotNull(s9); assertNotNull(s10); assertNotNull(s11); assertNotNull(s12); assertEquals(s1.getTaskManagerLocation(), s5.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s6.getTaskManagerLocation()); assertEquals(s3.getTaskManagerLocation(), s7.getTaskManagerLocation()); assertEquals(s4.getTaskManagerLocation(), s10.getTaskManagerLocation()); assertEquals(s8.getTaskManagerLocation(), s11.getTaskManagerLocation()); assertEquals(s9.getTaskManagerLocation(), s12.getTaskManagerLocation()); assertEquals(c1.getLocation(), s1.getTaskManagerLocation()); assertEquals(c2.getLocation(), s2.getTaskManagerLocation()); assertEquals(c3.getLocation(), s3.getTaskManagerLocation()); assertEquals(c4.getLocation(), s4.getTaskManagerLocation()); assertEquals(c5.getLocation(), s8.getTaskManagerLocation()); assertEquals(c6.getLocation(), s9.getTaskManagerLocation()); assertEquals(0, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(6, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(6, testingSlotProvider.getNumberOfUnconstrainedAssignments()); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s4.releaseSlot(); s7.releaseSlot(); s10.releaseSlot(); s11.releaseSlot(); s12.releaseSlot(); assertTrue(testingSlotProvider.getNumberOfAvailableSlots() >= 1); LogicalSlot single = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(new JobVertexID(), 0, 1, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); assertNotNull(single); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s5.releaseSlot(); s6.releaseSlot(); s7.releaseSlot(); s8.releaseSlot(); s9.releaseSlot(); s11.releaseSlot(); s12.releaseSlot(); assertEquals(5, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(6, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(7, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void scheduleWithIntermediateRelease() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); JobVertexID jid4 = new JobVertexID(); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationConstraint c1 = new CoLocationConstraint(new CoLocationGroup()); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot sSolo = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 0, 1, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); ResourceID taskManager = s1.getTaskManagerLocation().getResourceID(); s1.releaseSlot(); s2.releaseSlot(); sSolo.releaseSlot(); LogicalSlot sNew = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); assertEquals(taskManager, sNew.getTaskManagerLocation().getResourceID()); assertEquals(2, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(2, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void scheduleWithReleaseNoResource() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationConstraint c1 = new CoLocationConstraint(new CoLocationGroup()); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); s1.releaseSlot(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 0, 1, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 1, 2, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); try { testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid3, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); fail("Scheduled even though no resource was available."); } catch (ExecutionException e) { assertTrue(e.getCause() instanceof NoResourceAvailableException); } assertEquals(0, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(3, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void scheduleMixedCoLocationSlotSharing() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); JobVertexID jid4 = new JobVertexID(); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); assertEquals(4, testingSlotProvider.getNumberOfAvailableSlots()); CoLocationGroup grp = new CoLocationGroup(); CoLocationConstraint clc1 = new CoLocationConstraint(grp); CoLocationConstraint clc2 = new CoLocationConstraint(grp); CoLocationConstraint clc3 = new CoLocationConstraint(grp); CoLocationConstraint clc4 = new CoLocationConstraint(grp); SlotSharingGroup shareGroup = new SlotSharingGroup(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 0, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 2, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 1, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 3, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s21 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s22 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 2, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s23 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc3), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s24 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 3, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc4), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s31 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 1, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s32 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 2, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc3), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s33 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 3, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc4), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s34 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 0, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 0, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 1, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 2, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 3, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); assertEquals(s21.getTaskManagerLocation(), s34.getTaskManagerLocation()); assertEquals(s22.getTaskManagerLocation(), s31.getTaskManagerLocation()); assertEquals(s23.getTaskManagerLocation(), s32.getTaskManagerLocation()); assertEquals(s24.getTaskManagerLocation(), s33.getTaskManagerLocation()); assertEquals(4, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(12, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test @Test public void testSlotReleasedInBetween() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); s1.releaseSlot(); s2.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); LogicalSlot s3 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); assertEquals(loc1, s3.getTaskManagerLocation()); assertEquals(loc2, s4.getTaskManagerLocation()); s3.releaseSlot(); s4.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(4, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void testSlotReleasedInBetweenAndNoNewLocal() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jidx = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); s1.releaseSlot(); s2.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); LogicalSlot sa = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jidx, 0, 2, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot sb = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jidx, 1, 2, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); try { testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); fail("should not be able to find a resource"); } catch (ExecutionException e) { assertTrue(e.getCause() instanceof NoResourceAvailableException); } catch (Exception e) { fail("wrong exception"); } sa.releaseSlot(); sb.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(2, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(2, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void testScheduleOutOfOrder() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s3 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); assertEquals(s1.getTaskManagerLocation(), s3.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s4.getTaskManagerLocation()); assertEquals(0, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(3, testingSlotProvider.getNumberOfLocalizedAssignments()); assertTrue(1 == testingSlotProvider.getNumberOfNonLocalizedAssignments() || 1 == testingSlotProvider.getNumberOfHostLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfUnconstrainedAssignments()); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s4.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); } @Test public void nonColocationFollowsCoLocation() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); LogicalSlot s3 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId()), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId()), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); assertEquals(s1.getTaskManagerLocation(), s3.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s4.getTaskManagerLocation()); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s4.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); } private static SlotProfile slotProfileForLocation(TaskManagerLocation location) { return SlotProfile.preferredLocality(ResourceProfile.UNKNOWN, Collections.singletonList(location)); } }
class ScheduleWithCoLocationHintTest extends SchedulerTestBase { @Override protected ComponentMainThreadExecutor getComponentMainThreadExecutor() { return ComponentMainThreadExecutorServiceAdapter.forMainThread(); } @Test public void scheduleAllSharedAndCoLocated() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); testingSlotProvider.addTaskManager(2); testingSlotProvider.addTaskManager(2); testingSlotProvider.addTaskManager(2); assertEquals(6, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint c1 = new CoLocationConstraint(ccg); CoLocationConstraint c2 = new CoLocationConstraint(ccg); CoLocationConstraint c3 = new CoLocationConstraint(ccg); CoLocationConstraint c4 = new CoLocationConstraint(ccg); CoLocationConstraint c5 = new CoLocationConstraint(ccg); CoLocationConstraint c6 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 0, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 1, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s3 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 2, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c3), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 3, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c4), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s5 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 0, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s6 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 1, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s7 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 2, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c3), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s8 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 4, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c5), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s9 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 5, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c6), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s10 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 3, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c4), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s11 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 4, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c5), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s12 = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 5, 6, sharingGroup), sharingGroup.getSlotSharingGroupId(), c6), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); assertNotNull(s1); assertNotNull(s2); assertNotNull(s3); assertNotNull(s4); assertNotNull(s5); assertNotNull(s6); assertNotNull(s7); assertNotNull(s8); assertNotNull(s9); assertNotNull(s10); assertNotNull(s11); assertNotNull(s12); assertEquals(s1.getTaskManagerLocation(), s5.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s6.getTaskManagerLocation()); assertEquals(s3.getTaskManagerLocation(), s7.getTaskManagerLocation()); assertEquals(s4.getTaskManagerLocation(), s10.getTaskManagerLocation()); assertEquals(s8.getTaskManagerLocation(), s11.getTaskManagerLocation()); assertEquals(s9.getTaskManagerLocation(), s12.getTaskManagerLocation()); assertEquals(c1.getLocation(), s1.getTaskManagerLocation()); assertEquals(c2.getLocation(), s2.getTaskManagerLocation()); assertEquals(c3.getLocation(), s3.getTaskManagerLocation()); assertEquals(c4.getLocation(), s4.getTaskManagerLocation()); assertEquals(c5.getLocation(), s8.getTaskManagerLocation()); assertEquals(c6.getLocation(), s9.getTaskManagerLocation()); assertEquals(0, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(6, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(6, testingSlotProvider.getNumberOfUnconstrainedAssignments()); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s4.releaseSlot(); s7.releaseSlot(); s10.releaseSlot(); s11.releaseSlot(); s12.releaseSlot(); assertTrue(testingSlotProvider.getNumberOfAvailableSlots() >= 1); LogicalSlot single = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(new JobVertexID(), 0, 1, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); assertNotNull(single); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s5.releaseSlot(); s6.releaseSlot(); s7.releaseSlot(); s8.releaseSlot(); s9.releaseSlot(); s11.releaseSlot(); s12.releaseSlot(); assertEquals(5, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(6, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(7, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void scheduleWithIntermediateRelease() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); JobVertexID jid4 = new JobVertexID(); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationConstraint c1 = new CoLocationConstraint(new CoLocationGroup()); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot sSolo = testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 0, 1, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); ResourceID taskManager = s1.getTaskManagerLocation().getResourceID(); s1.releaseSlot(); s2.releaseSlot(); sSolo.releaseSlot(); LogicalSlot sNew = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); assertEquals(taskManager, sNew.getTaskManagerLocation().getResourceID()); assertEquals(2, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(2, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void scheduleWithReleaseNoResource() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationConstraint c1 = new CoLocationConstraint(new CoLocationGroup()); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); s1.releaseSlot(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 0, 1, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid2, 1, 2, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); try { testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid3, 0, 1, sharingGroup), sharingGroup.getSlotSharingGroupId(), c1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); fail("Scheduled even though no resource was available."); } catch (ExecutionException e) { assertTrue(e.getCause() instanceof NoResourceAvailableException); } assertEquals(0, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(3, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void scheduleMixedCoLocationSlotSharing() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); JobVertexID jid4 = new JobVertexID(); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); assertEquals(4, testingSlotProvider.getNumberOfAvailableSlots()); CoLocationGroup grp = new CoLocationGroup(); CoLocationConstraint clc1 = new CoLocationConstraint(grp); CoLocationConstraint clc2 = new CoLocationConstraint(grp); CoLocationConstraint clc3 = new CoLocationConstraint(grp); CoLocationConstraint clc4 = new CoLocationConstraint(grp); SlotSharingGroup shareGroup = new SlotSharingGroup(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 0, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 2, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 1, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid1, 3, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s21 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s22 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 2, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s23 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc3), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s24 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 3, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc4), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s31 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 1, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc2), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s32 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 2, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc3), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s33 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 3, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc4), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot s34 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid3, 0, 4, shareGroup), shareGroup.getSlotSharingGroupId(), clc1), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 0, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 1, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 2, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); testingSlotProvider.allocateSlot(new ScheduledUnit(getExecution(jid4, 3, 4, shareGroup), shareGroup.getSlotSharingGroupId()), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); assertEquals(s21.getTaskManagerLocation(), s34.getTaskManagerLocation()); assertEquals(s22.getTaskManagerLocation(), s31.getTaskManagerLocation()); assertEquals(s23.getTaskManagerLocation(), s32.getTaskManagerLocation()); assertEquals(s24.getTaskManagerLocation(), s33.getTaskManagerLocation()); assertEquals(4, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(12, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test @Test public void testSlotReleasedInBetween() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); s1.releaseSlot(); s2.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); LogicalSlot s3 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); assertEquals(loc1, s3.getTaskManagerLocation()); assertEquals(loc2, s4.getTaskManagerLocation()); s3.releaseSlot(); s4.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(4, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void testSlotReleasedInBetweenAndNoNewLocal() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jidx = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); s1.releaseSlot(); s2.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); LogicalSlot sa = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jidx, 0, 2, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); LogicalSlot sb = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jidx, 1, 2, null)), SlotProfile.noRequirements(), TestingUtils.infiniteTime()).get(); try { testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); fail("should not be able to find a resource"); } catch (ExecutionException e) { assertTrue(e.getCause() instanceof NoResourceAvailableException); } catch (Exception e) { fail("wrong exception"); } sa.releaseSlot(); sb.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(2, testingSlotProvider.getNumberOfLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfNonLocalizedAssignments()); assertEquals(2, testingSlotProvider.getNumberOfUnconstrainedAssignments()); } @Test public void testScheduleOutOfOrder() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s3 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); assertEquals(s1.getTaskManagerLocation(), s3.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s4.getTaskManagerLocation()); assertEquals(0, testingSlotProvider.getNumberOfAvailableSlots()); assertEquals(3, testingSlotProvider.getNumberOfLocalizedAssignments()); assertTrue(1 == testingSlotProvider.getNumberOfNonLocalizedAssignments() || 1 == testingSlotProvider.getNumberOfHostLocalizedAssignments()); assertEquals(0, testingSlotProvider.getNumberOfUnconstrainedAssignments()); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s4.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); } @Test public void nonColocationFollowsCoLocation() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); SlotSharingGroup sharingGroup = new SlotSharingGroup(); CoLocationGroup ccg = new CoLocationGroup(); CoLocationConstraint cc1 = new CoLocationConstraint(ccg); CoLocationConstraint cc2 = new CoLocationConstraint(ccg); LogicalSlot s1 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc1), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s2 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid1, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc2), TestingUtils.infiniteTime()).get(); LogicalSlot s3 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 0, 2, sharingGroup), sharingGroup.getSlotSharingGroupId()), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); LogicalSlot s4 = testingSlotProvider.allocateSlot( new ScheduledUnit(getExecution(jid2, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId()), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get(); assertEquals(s1.getTaskManagerLocation(), s3.getTaskManagerLocation()); assertEquals(s2.getTaskManagerLocation(), s4.getTaskManagerLocation()); s1.releaseSlot(); s2.releaseSlot(); s3.releaseSlot(); s4.releaseSlot(); assertEquals(2, testingSlotProvider.getNumberOfAvailableSlots()); } private static SlotProfile slotProfileForLocation(TaskManagerLocation location) { return SlotProfile.preferredLocality(ResourceProfile.UNKNOWN, Collections.singletonList(location)); } }
user's init workload group = normal?
public void update(List<Pair<String, String>> properties) throws UserException { long newMaxConn = this.commonProperties.getMaxConn(); long newMaxQueryInstances = this.commonProperties.getMaxQueryInstances(); String sqlBlockRules = this.commonProperties.getSqlBlockRules(); int cpuResourceLimit = this.commonProperties.getCpuResourceLimit(); Set<Tag> resourceTags = this.commonProperties.getResourceTags(); long execMemLimit = this.commonProperties.getExecMemLimit(); int queryTimeout = this.commonProperties.getQueryTimeout(); int insertTimeout = this.commonProperties.getInsertTimeout(); String workloadGroup = this.commonProperties.getWorkloadGroup(); String newDefaultLoadCluster = defaultLoadCluster; Map<String, DppConfig> newDppConfigs = Maps.newHashMap(clusterToDppConfig); for (Pair<String, String> entry : properties) { String key = entry.first; String value = entry.second; String[] keyArr = key.split("\\" + SetUserPropertyVar.DOT_SEPARATOR); if (keyArr[0].equalsIgnoreCase(PROP_MAX_USER_CONNECTIONS)) { if (keyArr.length != 1) { throw new DdlException(PROP_MAX_USER_CONNECTIONS + " format error"); } try { newMaxConn = Long.parseLong(value); } catch (NumberFormatException e) { throw new DdlException(PROP_MAX_USER_CONNECTIONS + " is not number"); } if (newMaxConn <= 0 || newMaxConn > 10000) { throw new DdlException(PROP_MAX_USER_CONNECTIONS + " is not valid, must between 1 and 10000"); } } else if (keyArr[0].equalsIgnoreCase(PROP_LOAD_CLUSTER)) { updateLoadCluster(keyArr, value, newDppConfigs); } else if (keyArr[0].equalsIgnoreCase(PROP_DEFAULT_LOAD_CLUSTER)) { if (keyArr.length != 1) { throw new DdlException(PROP_DEFAULT_LOAD_CLUSTER + " format error"); } if (value != null && !newDppConfigs.containsKey(value)) { throw new DdlException("Load cluster[" + value + "] does not exist"); } newDefaultLoadCluster = value; } else if (keyArr[0].equalsIgnoreCase(PROP_MAX_QUERY_INSTANCES)) { if (keyArr.length != 1) { throw new DdlException(PROP_MAX_QUERY_INSTANCES + " format error"); } try { newMaxQueryInstances = Long.parseLong(value); } catch (NumberFormatException e) { throw new DdlException(PROP_MAX_QUERY_INSTANCES + " is not number"); } } else if (keyArr[0].equalsIgnoreCase(PROP_SQL_BLOCK_RULES)) { if (keyArr.length != 1) { throw new DdlException(PROP_SQL_BLOCK_RULES + " format error"); } for (String ruleName : value.replaceAll(" ", "").split(",")) { if (!ruleName.equals("") && !Env.getCurrentEnv().getSqlBlockRuleMgr().existRule(ruleName)) { throw new DdlException("the sql block rule " + ruleName + " not exist"); } } sqlBlockRules = value; } else if (keyArr[0].equalsIgnoreCase(PROP_CPU_RESOURCE_LIMIT)) { if (keyArr.length != 1) { throw new DdlException(PROP_CPU_RESOURCE_LIMIT + " format error"); } int limit = -1; try { limit = Integer.parseInt(value); } catch (NumberFormatException e) { throw new DdlException(key + " is not number"); } if (limit <= 0 && limit != -1) { throw new DdlException(key + " is not valid. Should not larger than 0 or equal to -1"); } cpuResourceLimit = limit; } else if (keyArr[0].equalsIgnoreCase(PROP_RESOURCE_TAGS)) { if (keyArr.length != 2) { throw new DdlException(PROP_RESOURCE_TAGS + " format error"); } if (!keyArr[1].equals(Tag.TYPE_LOCATION)) { throw new DdlException("Only support location tag now"); } if (Strings.isNullOrEmpty(value)) { resourceTags = Sets.newHashSet(); } else { try { resourceTags = parseLocationResoureTags(value); } catch (NumberFormatException e) { throw new DdlException(PROP_RESOURCE_TAGS + " parse failed: " + e.getMessage()); } } } else if (keyArr[0].equalsIgnoreCase(PROP_EXEC_MEM_LIMIT)) { execMemLimit = getLongProperty(key, value, keyArr, PROP_EXEC_MEM_LIMIT); } else if (keyArr[0].equalsIgnoreCase(PROP_USER_QUERY_TIMEOUT)) { if (keyArr.length != 1) { throw new DdlException(PROP_USER_QUERY_TIMEOUT + " format error"); } try { queryTimeout = Integer.parseInt(value); } catch (NumberFormatException e) { throw new DdlException(PROP_USER_QUERY_TIMEOUT + " is not number"); } } else if (keyArr[0].equalsIgnoreCase(PROP_USER_INSERT_TIMEOUT)) { if (keyArr.length != 1) { throw new DdlException(PROP_USER_INSERT_TIMEOUT + " format error"); } try { insertTimeout = Integer.parseInt(value); } catch (NumberFormatException e) { throw new DdlException(PROP_USER_INSERT_TIMEOUT + " is not number"); } } else if (keyArr[0].equalsIgnoreCase(PROP_WORKLOAD_GROUP)) { if (keyArr.length != 1) { throw new DdlException(PROP_WORKLOAD_GROUP + " format error"); } workloadGroup = value; } else { throw new DdlException("Unknown user property(" + key + ")"); } } this.commonProperties.setMaxConn(newMaxConn); this.commonProperties.setMaxQueryInstances(newMaxQueryInstances); this.commonProperties.setSqlBlockRules(sqlBlockRules); this.commonProperties.setCpuResourceLimit(cpuResourceLimit); this.commonProperties.setResourceTags(resourceTags); this.commonProperties.setExecMemLimit(execMemLimit); this.commonProperties.setQueryTimeout(queryTimeout); this.commonProperties.setInsertTimeout(insertTimeout); this.commonProperties.setWorkloadGroup(workloadGroup); if (newDppConfigs.containsKey(newDefaultLoadCluster)) { defaultLoadCluster = newDefaultLoadCluster; } else { defaultLoadCluster = null; } clusterToDppConfig = newDppConfigs; }
String workloadGroup = this.commonProperties.getWorkloadGroup();
public void update(List<Pair<String, String>> properties) throws UserException { long newMaxConn = this.commonProperties.getMaxConn(); long newMaxQueryInstances = this.commonProperties.getMaxQueryInstances(); String sqlBlockRules = this.commonProperties.getSqlBlockRules(); int cpuResourceLimit = this.commonProperties.getCpuResourceLimit(); Set<Tag> resourceTags = this.commonProperties.getResourceTags(); long execMemLimit = this.commonProperties.getExecMemLimit(); int queryTimeout = this.commonProperties.getQueryTimeout(); int insertTimeout = this.commonProperties.getInsertTimeout(); String workloadGroup = this.commonProperties.getWorkloadGroup(); String newDefaultLoadCluster = defaultLoadCluster; Map<String, DppConfig> newDppConfigs = Maps.newHashMap(clusterToDppConfig); for (Pair<String, String> entry : properties) { String key = entry.first; String value = entry.second; String[] keyArr = key.split("\\" + SetUserPropertyVar.DOT_SEPARATOR); if (keyArr[0].equalsIgnoreCase(PROP_MAX_USER_CONNECTIONS)) { if (keyArr.length != 1) { throw new DdlException(PROP_MAX_USER_CONNECTIONS + " format error"); } try { newMaxConn = Long.parseLong(value); } catch (NumberFormatException e) { throw new DdlException(PROP_MAX_USER_CONNECTIONS + " is not number"); } if (newMaxConn <= 0 || newMaxConn > 10000) { throw new DdlException(PROP_MAX_USER_CONNECTIONS + " is not valid, must between 1 and 10000"); } } else if (keyArr[0].equalsIgnoreCase(PROP_LOAD_CLUSTER)) { updateLoadCluster(keyArr, value, newDppConfigs); } else if (keyArr[0].equalsIgnoreCase(PROP_DEFAULT_LOAD_CLUSTER)) { if (keyArr.length != 1) { throw new DdlException(PROP_DEFAULT_LOAD_CLUSTER + " format error"); } if (value != null && !newDppConfigs.containsKey(value)) { throw new DdlException("Load cluster[" + value + "] does not exist"); } newDefaultLoadCluster = value; } else if (keyArr[0].equalsIgnoreCase(PROP_MAX_QUERY_INSTANCES)) { if (keyArr.length != 1) { throw new DdlException(PROP_MAX_QUERY_INSTANCES + " format error"); } try { newMaxQueryInstances = Long.parseLong(value); } catch (NumberFormatException e) { throw new DdlException(PROP_MAX_QUERY_INSTANCES + " is not number"); } } else if (keyArr[0].equalsIgnoreCase(PROP_SQL_BLOCK_RULES)) { if (keyArr.length != 1) { throw new DdlException(PROP_SQL_BLOCK_RULES + " format error"); } for (String ruleName : value.replaceAll(" ", "").split(",")) { if (!ruleName.equals("") && !Env.getCurrentEnv().getSqlBlockRuleMgr().existRule(ruleName)) { throw new DdlException("the sql block rule " + ruleName + " not exist"); } } sqlBlockRules = value; } else if (keyArr[0].equalsIgnoreCase(PROP_CPU_RESOURCE_LIMIT)) { if (keyArr.length != 1) { throw new DdlException(PROP_CPU_RESOURCE_LIMIT + " format error"); } int limit = -1; try { limit = Integer.parseInt(value); } catch (NumberFormatException e) { throw new DdlException(key + " is not number"); } if (limit <= 0 && limit != -1) { throw new DdlException(key + " is not valid. Should not larger than 0 or equal to -1"); } cpuResourceLimit = limit; } else if (keyArr[0].equalsIgnoreCase(PROP_RESOURCE_TAGS)) { if (keyArr.length != 2) { throw new DdlException(PROP_RESOURCE_TAGS + " format error"); } if (!keyArr[1].equals(Tag.TYPE_LOCATION)) { throw new DdlException("Only support location tag now"); } if (Strings.isNullOrEmpty(value)) { resourceTags = Sets.newHashSet(); } else { try { resourceTags = parseLocationResoureTags(value); } catch (NumberFormatException e) { throw new DdlException(PROP_RESOURCE_TAGS + " parse failed: " + e.getMessage()); } } } else if (keyArr[0].equalsIgnoreCase(PROP_EXEC_MEM_LIMIT)) { execMemLimit = getLongProperty(key, value, keyArr, PROP_EXEC_MEM_LIMIT); } else if (keyArr[0].equalsIgnoreCase(PROP_USER_QUERY_TIMEOUT)) { if (keyArr.length != 1) { throw new DdlException(PROP_USER_QUERY_TIMEOUT + " format error"); } try { queryTimeout = Integer.parseInt(value); } catch (NumberFormatException e) { throw new DdlException(PROP_USER_QUERY_TIMEOUT + " is not number"); } } else if (keyArr[0].equalsIgnoreCase(PROP_USER_INSERT_TIMEOUT)) { if (keyArr.length != 1) { throw new DdlException(PROP_USER_INSERT_TIMEOUT + " format error"); } try { insertTimeout = Integer.parseInt(value); } catch (NumberFormatException e) { throw new DdlException(PROP_USER_INSERT_TIMEOUT + " is not number"); } } else if (keyArr[0].equalsIgnoreCase(PROP_WORKLOAD_GROUP)) { if (keyArr.length != 1) { throw new DdlException(PROP_WORKLOAD_GROUP + " format error"); } workloadGroup = value; } else { throw new DdlException("Unknown user property(" + key + ")"); } } this.commonProperties.setMaxConn(newMaxConn); this.commonProperties.setMaxQueryInstances(newMaxQueryInstances); this.commonProperties.setSqlBlockRules(sqlBlockRules); this.commonProperties.setCpuResourceLimit(cpuResourceLimit); this.commonProperties.setResourceTags(resourceTags); this.commonProperties.setExecMemLimit(execMemLimit); this.commonProperties.setQueryTimeout(queryTimeout); this.commonProperties.setInsertTimeout(insertTimeout); this.commonProperties.setWorkloadGroup(workloadGroup); if (newDppConfigs.containsKey(newDefaultLoadCluster)) { defaultLoadCluster = newDefaultLoadCluster; } else { defaultLoadCluster = null; } clusterToDppConfig = newDppConfigs; }
class UserProperty implements Writable { private static final String PROP_MAX_USER_CONNECTIONS = "max_user_connections"; private static final String PROP_MAX_QUERY_INSTANCES = "max_query_instances"; private static final String PROP_RESOURCE_TAGS = "resource_tags"; private static final String PROP_RESOURCE = "resource"; private static final String PROP_SQL_BLOCK_RULES = "sql_block_rules"; private static final String PROP_CPU_RESOURCE_LIMIT = "cpu_resource_limit"; private static final String PROP_EXEC_MEM_LIMIT = "exec_mem_limit"; private static final String PROP_USER_QUERY_TIMEOUT = "query_timeout"; private static final String PROP_USER_INSERT_TIMEOUT = "insert_timeout"; private static final String PROP_LOAD_CLUSTER = "load_cluster"; private static final String PROP_QUOTA = "quota"; private static final String PROP_DEFAULT_LOAD_CLUSTER = "default_load_cluster"; private static final String PROP_WORKLOAD_GROUP = "workload_group"; public static final Set<Pattern> ADVANCED_PROPERTIES = Sets.newHashSet(); public static final Set<Pattern> COMMON_PROPERTIES = Sets.newHashSet(); private String qualifiedUser; private CommonUserProperties commonProperties = new CommonUserProperties(); private String defaultLoadCluster = null; private Map<String, DppConfig> clusterToDppConfig = Maps.newHashMap(); /* * We keep white list here to save Baidu domain name (BNS) or DNS as white list. * Each frontend will periodically resolve the domain name to ip, and update the privilege table. * We never persist the resolved IPs. */ @Deprecated private WhiteList whiteList = new WhiteList(); public static final Set<Tag> INVALID_RESOURCE_TAGS; static { INVALID_RESOURCE_TAGS = Sets.newHashSet(); INVALID_RESOURCE_TAGS.add(Tag.INVALID_TAG); } static { ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_MAX_USER_CONNECTIONS + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_RESOURCE + ".", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_LOAD_CLUSTER + "." + DppConfig.CLUSTER_NAME_REGEX + "." + DppConfig.PRIORITY + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_MAX_QUERY_INSTANCES + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_SQL_BLOCK_RULES + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_CPU_RESOURCE_LIMIT + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_RESOURCE_TAGS + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_EXEC_MEM_LIMIT + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_USER_QUERY_TIMEOUT + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_USER_INSERT_TIMEOUT + "$", Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + PROP_QUOTA + ".", Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + PROP_DEFAULT_LOAD_CLUSTER + "$", Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + PROP_LOAD_CLUSTER + "." + DppConfig.CLUSTER_NAME_REGEX + ".", Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + PROP_WORKLOAD_GROUP + "$", Pattern.CASE_INSENSITIVE)); } public UserProperty() { } public UserProperty(String qualifiedUser) { this.qualifiedUser = qualifiedUser; } public String getQualifiedUser() { return qualifiedUser; } public long getMaxConn() { return this.commonProperties.getMaxConn(); } public int getQueryTimeout() { return this.commonProperties.getQueryTimeout(); } public int getInsertTimeout() { return this.commonProperties.getInsertTimeout(); } public long getMaxQueryInstances() { return commonProperties.getMaxQueryInstances(); } public String[] getSqlBlockRules() { return commonProperties.getSqlBlockRulesSplit(); } public int getCpuResourceLimit() { return commonProperties.getCpuResourceLimit(); } public String getWorkloadGroup() { return commonProperties.getWorkloadGroup(); } @Deprecated public WhiteList getWhiteList() { return whiteList; } public Set<Tag> getCopiedResourceTags() { return Sets.newHashSet(this.commonProperties.getResourceTags()); } public long getExecMemLimit() { return commonProperties.getExecMemLimit(); } private long getLongProperty(String key, String value, String[] keyArr, String propName) throws DdlException { if (keyArr.length != 1) { throw new DdlException(propName + " format error"); } long limit = -1; try { limit = Long.parseLong(value); } catch (NumberFormatException e) { throw new DdlException(key + " is not number"); } if (limit <= 0 && limit != -1) { throw new DdlException(key + " is not valid. Should not larger than 0 or equal to -1"); } return limit; } private Set<Tag> parseLocationResoureTags(String value) throws AnalysisException { Set<Tag> tags = Sets.newHashSet(); String[] parts = value.replaceAll(" ", "").split(","); for (String part : parts) { Tag tag = Tag.create(Tag.TYPE_LOCATION, part); tags.add(tag); } return tags; } private void updateLoadCluster(String[] keyArr, String value, Map<String, DppConfig> newDppConfigs) throws DdlException { if (keyArr.length == 1 && Strings.isNullOrEmpty(value)) { newDppConfigs.clear(); } else if (keyArr.length == 2 && Strings.isNullOrEmpty(value)) { String cluster = keyArr[1]; newDppConfigs.remove(cluster); } else if (keyArr.length == 3 && Strings.isNullOrEmpty(value)) { String cluster = keyArr[1]; if (!newDppConfigs.containsKey(cluster)) { throw new DdlException("Load cluster[" + value + "] does not exist"); } try { newDppConfigs.get(cluster).resetConfigByKey(keyArr[2]); } catch (LoadException e) { throw new DdlException(e.getMessage()); } } else if (keyArr.length == 3 && value != null) { String cluster = keyArr[1]; Map<String, String> configMap = Maps.newHashMap(); configMap.put(keyArr[2], value); try { DppConfig newDppConfig = DppConfig.create(configMap); if (newDppConfigs.containsKey(cluster)) { newDppConfigs.get(cluster).update(newDppConfig, true); } else { newDppConfigs.put(cluster, newDppConfig); } } catch (LoadException e) { throw new DdlException(e.getMessage()); } } else { throw new DdlException(PROP_LOAD_CLUSTER + " format error"); } } public String getDefaultLoadCluster() { return defaultLoadCluster; } public Pair<String, DppConfig> getLoadClusterInfo(String cluster) { String tmpCluster = cluster; if (tmpCluster == null) { tmpCluster = defaultLoadCluster; } DppConfig dppConfig = null; if (tmpCluster != null) { dppConfig = clusterToDppConfig.get(tmpCluster); if (dppConfig != null) { dppConfig = dppConfig.getCopiedDppConfig(); } } return Pair.of(tmpCluster, dppConfig); } public List<List<String>> fetchProperty() { List<List<String>> result = Lists.newArrayList(); String dot = SetUserPropertyVar.DOT_SEPARATOR; result.add(Lists.newArrayList(PROP_MAX_USER_CONNECTIONS, String.valueOf(commonProperties.getMaxConn()))); result.add(Lists.newArrayList(PROP_MAX_QUERY_INSTANCES, String.valueOf(commonProperties.getMaxQueryInstances()))); result.add(Lists.newArrayList(PROP_SQL_BLOCK_RULES, commonProperties.getSqlBlockRules())); result.add(Lists.newArrayList(PROP_CPU_RESOURCE_LIMIT, String.valueOf(commonProperties.getCpuResourceLimit()))); result.add(Lists.newArrayList(PROP_EXEC_MEM_LIMIT, String.valueOf(commonProperties.getExecMemLimit()))); result.add(Lists.newArrayList(PROP_USER_QUERY_TIMEOUT, String.valueOf(commonProperties.getQueryTimeout()))); result.add(Lists.newArrayList(PROP_USER_INSERT_TIMEOUT, String.valueOf(commonProperties.getInsertTimeout()))); result.add(Lists.newArrayList(PROP_RESOURCE_TAGS, Joiner.on(", ").join(commonProperties.getResourceTags()))); result.add(Lists.newArrayList(PROP_WORKLOAD_GROUP, String.valueOf(commonProperties.getWorkloadGroup()))); if (defaultLoadCluster != null) { result.add(Lists.newArrayList(PROP_DEFAULT_LOAD_CLUSTER, defaultLoadCluster)); } else { result.add(Lists.newArrayList(PROP_DEFAULT_LOAD_CLUSTER, "")); } for (Map.Entry<String, DppConfig> entry : clusterToDppConfig.entrySet()) { String cluster = entry.getKey(); DppConfig dppConfig = entry.getValue(); String clusterPrefix = PROP_LOAD_CLUSTER + dot + cluster + dot; if (dppConfig.getPaloPath() != null) { result.add(Lists.newArrayList(clusterPrefix + DppConfig.getPaloPathKey(), dppConfig.getPaloPath())); } result.add(Lists.newArrayList(clusterPrefix + DppConfig.getHttpPortKey(), String.valueOf(dppConfig.getHttpPort()))); if (dppConfig.getHadoopConfigs() != null) { List<String> configs = Lists.newArrayList(); for (Map.Entry<String, String> configEntry : dppConfig.getHadoopConfigs().entrySet()) { configs.add(String.format("%s=%s", configEntry.getKey(), configEntry.getValue())); } result.add(Lists.newArrayList(clusterPrefix + DppConfig.getHadoopConfigsKey(), StringUtils.join(configs, ";"))); } result.add(Lists.newArrayList(clusterPrefix + DppConfig.getPriorityKey(), String.valueOf(dppConfig.getPriority()))); } Collections.sort(result, new Comparator<List<String>>() { @Override public int compare(List<String> o1, List<String> o2) { return o1.get(0).compareTo(o2.get(0)); } }); return result; } public static UserProperty read(DataInput in) throws IOException { UserProperty userProperty = new UserProperty(); userProperty.readFields(in); return userProperty; } @Override public void write(DataOutput out) throws IOException { Text.writeString(out, qualifiedUser); UserResource.write(out); if (defaultLoadCluster == null) { out.writeBoolean(false); } else { out.writeBoolean(true); Text.writeString(out, defaultLoadCluster); } out.writeInt(clusterToDppConfig.size()); for (Map.Entry<String, DppConfig> entry : clusterToDppConfig.entrySet()) { Text.writeString(out, entry.getKey()); entry.getValue().write(out); } commonProperties.write(out); } public void readFields(DataInput in) throws IOException { qualifiedUser = Text.readString(in); if (Env.getCurrentEnvJournalVersion() < FeMetaVersion.VERSION_100) { long maxConn = in.readLong(); this.commonProperties.setMaxConn(maxConn); } UserResource.readIn(in); if (in.readBoolean()) { defaultLoadCluster = Text.readString(in); } int clusterNum = in.readInt(); for (int i = 0; i < clusterNum; ++i) { String cluster = Text.readString(in); DppConfig dppConfig = new DppConfig(); dppConfig.readFields(in); clusterToDppConfig.put(cluster, dppConfig); } if (Env.getCurrentEnvJournalVersion() < FeMetaVersion.VERSION_116) { whiteList.readFields(in); } else { whiteList = new WhiteList(); } if (Env.getCurrentEnvJournalVersion() >= FeMetaVersion.VERSION_100) { this.commonProperties = CommonUserProperties.read(in); } } }
class UserProperty implements Writable { private static final String PROP_MAX_USER_CONNECTIONS = "max_user_connections"; private static final String PROP_MAX_QUERY_INSTANCES = "max_query_instances"; private static final String PROP_RESOURCE_TAGS = "resource_tags"; private static final String PROP_RESOURCE = "resource"; private static final String PROP_SQL_BLOCK_RULES = "sql_block_rules"; private static final String PROP_CPU_RESOURCE_LIMIT = "cpu_resource_limit"; private static final String PROP_EXEC_MEM_LIMIT = "exec_mem_limit"; private static final String PROP_USER_QUERY_TIMEOUT = "query_timeout"; private static final String PROP_USER_INSERT_TIMEOUT = "insert_timeout"; private static final String PROP_LOAD_CLUSTER = "load_cluster"; private static final String PROP_QUOTA = "quota"; private static final String PROP_DEFAULT_LOAD_CLUSTER = "default_load_cluster"; private static final String PROP_WORKLOAD_GROUP = "default_workload_group"; public static final Set<Pattern> ADVANCED_PROPERTIES = Sets.newHashSet(); public static final Set<Pattern> COMMON_PROPERTIES = Sets.newHashSet(); private String qualifiedUser; private CommonUserProperties commonProperties = new CommonUserProperties(); private String defaultLoadCluster = null; private Map<String, DppConfig> clusterToDppConfig = Maps.newHashMap(); /* * We keep white list here to save Baidu domain name (BNS) or DNS as white list. * Each frontend will periodically resolve the domain name to ip, and update the privilege table. * We never persist the resolved IPs. */ @Deprecated private WhiteList whiteList = new WhiteList(); public static final Set<Tag> INVALID_RESOURCE_TAGS; static { INVALID_RESOURCE_TAGS = Sets.newHashSet(); INVALID_RESOURCE_TAGS.add(Tag.INVALID_TAG); } static { ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_MAX_USER_CONNECTIONS + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_RESOURCE + ".", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_LOAD_CLUSTER + "." + DppConfig.CLUSTER_NAME_REGEX + "." + DppConfig.PRIORITY + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_MAX_QUERY_INSTANCES + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_SQL_BLOCK_RULES + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_CPU_RESOURCE_LIMIT + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_RESOURCE_TAGS + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_EXEC_MEM_LIMIT + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_USER_QUERY_TIMEOUT + "$", Pattern.CASE_INSENSITIVE)); ADVANCED_PROPERTIES.add(Pattern.compile("^" + PROP_USER_INSERT_TIMEOUT + "$", Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + PROP_QUOTA + ".", Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + PROP_DEFAULT_LOAD_CLUSTER + "$", Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + PROP_LOAD_CLUSTER + "." + DppConfig.CLUSTER_NAME_REGEX + ".", Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + PROP_WORKLOAD_GROUP + "$", Pattern.CASE_INSENSITIVE)); } public UserProperty() { } public UserProperty(String qualifiedUser) { this.qualifiedUser = qualifiedUser; } public String getQualifiedUser() { return qualifiedUser; } public long getMaxConn() { return this.commonProperties.getMaxConn(); } public int getQueryTimeout() { return this.commonProperties.getQueryTimeout(); } public int getInsertTimeout() { return this.commonProperties.getInsertTimeout(); } public long getMaxQueryInstances() { return commonProperties.getMaxQueryInstances(); } public String[] getSqlBlockRules() { return commonProperties.getSqlBlockRulesSplit(); } public int getCpuResourceLimit() { return commonProperties.getCpuResourceLimit(); } public String getWorkloadGroup() { return commonProperties.getWorkloadGroup(); } @Deprecated public WhiteList getWhiteList() { return whiteList; } public Set<Tag> getCopiedResourceTags() { return Sets.newHashSet(this.commonProperties.getResourceTags()); } public long getExecMemLimit() { return commonProperties.getExecMemLimit(); } private long getLongProperty(String key, String value, String[] keyArr, String propName) throws DdlException { if (keyArr.length != 1) { throw new DdlException(propName + " format error"); } long limit = -1; try { limit = Long.parseLong(value); } catch (NumberFormatException e) { throw new DdlException(key + " is not number"); } if (limit <= 0 && limit != -1) { throw new DdlException(key + " is not valid. Should not larger than 0 or equal to -1"); } return limit; } private Set<Tag> parseLocationResoureTags(String value) throws AnalysisException { Set<Tag> tags = Sets.newHashSet(); String[] parts = value.replaceAll(" ", "").split(","); for (String part : parts) { Tag tag = Tag.create(Tag.TYPE_LOCATION, part); tags.add(tag); } return tags; } private void updateLoadCluster(String[] keyArr, String value, Map<String, DppConfig> newDppConfigs) throws DdlException { if (keyArr.length == 1 && Strings.isNullOrEmpty(value)) { newDppConfigs.clear(); } else if (keyArr.length == 2 && Strings.isNullOrEmpty(value)) { String cluster = keyArr[1]; newDppConfigs.remove(cluster); } else if (keyArr.length == 3 && Strings.isNullOrEmpty(value)) { String cluster = keyArr[1]; if (!newDppConfigs.containsKey(cluster)) { throw new DdlException("Load cluster[" + value + "] does not exist"); } try { newDppConfigs.get(cluster).resetConfigByKey(keyArr[2]); } catch (LoadException e) { throw new DdlException(e.getMessage()); } } else if (keyArr.length == 3 && value != null) { String cluster = keyArr[1]; Map<String, String> configMap = Maps.newHashMap(); configMap.put(keyArr[2], value); try { DppConfig newDppConfig = DppConfig.create(configMap); if (newDppConfigs.containsKey(cluster)) { newDppConfigs.get(cluster).update(newDppConfig, true); } else { newDppConfigs.put(cluster, newDppConfig); } } catch (LoadException e) { throw new DdlException(e.getMessage()); } } else { throw new DdlException(PROP_LOAD_CLUSTER + " format error"); } } public String getDefaultLoadCluster() { return defaultLoadCluster; } public Pair<String, DppConfig> getLoadClusterInfo(String cluster) { String tmpCluster = cluster; if (tmpCluster == null) { tmpCluster = defaultLoadCluster; } DppConfig dppConfig = null; if (tmpCluster != null) { dppConfig = clusterToDppConfig.get(tmpCluster); if (dppConfig != null) { dppConfig = dppConfig.getCopiedDppConfig(); } } return Pair.of(tmpCluster, dppConfig); } public List<List<String>> fetchProperty() { List<List<String>> result = Lists.newArrayList(); String dot = SetUserPropertyVar.DOT_SEPARATOR; result.add(Lists.newArrayList(PROP_MAX_USER_CONNECTIONS, String.valueOf(commonProperties.getMaxConn()))); result.add(Lists.newArrayList(PROP_MAX_QUERY_INSTANCES, String.valueOf(commonProperties.getMaxQueryInstances()))); result.add(Lists.newArrayList(PROP_SQL_BLOCK_RULES, commonProperties.getSqlBlockRules())); result.add(Lists.newArrayList(PROP_CPU_RESOURCE_LIMIT, String.valueOf(commonProperties.getCpuResourceLimit()))); result.add(Lists.newArrayList(PROP_EXEC_MEM_LIMIT, String.valueOf(commonProperties.getExecMemLimit()))); result.add(Lists.newArrayList(PROP_USER_QUERY_TIMEOUT, String.valueOf(commonProperties.getQueryTimeout()))); result.add(Lists.newArrayList(PROP_USER_INSERT_TIMEOUT, String.valueOf(commonProperties.getInsertTimeout()))); result.add(Lists.newArrayList(PROP_RESOURCE_TAGS, Joiner.on(", ").join(commonProperties.getResourceTags()))); result.add(Lists.newArrayList(PROP_WORKLOAD_GROUP, String.valueOf(commonProperties.getWorkloadGroup()))); if (defaultLoadCluster != null) { result.add(Lists.newArrayList(PROP_DEFAULT_LOAD_CLUSTER, defaultLoadCluster)); } else { result.add(Lists.newArrayList(PROP_DEFAULT_LOAD_CLUSTER, "")); } for (Map.Entry<String, DppConfig> entry : clusterToDppConfig.entrySet()) { String cluster = entry.getKey(); DppConfig dppConfig = entry.getValue(); String clusterPrefix = PROP_LOAD_CLUSTER + dot + cluster + dot; if (dppConfig.getPaloPath() != null) { result.add(Lists.newArrayList(clusterPrefix + DppConfig.getPaloPathKey(), dppConfig.getPaloPath())); } result.add(Lists.newArrayList(clusterPrefix + DppConfig.getHttpPortKey(), String.valueOf(dppConfig.getHttpPort()))); if (dppConfig.getHadoopConfigs() != null) { List<String> configs = Lists.newArrayList(); for (Map.Entry<String, String> configEntry : dppConfig.getHadoopConfigs().entrySet()) { configs.add(String.format("%s=%s", configEntry.getKey(), configEntry.getValue())); } result.add(Lists.newArrayList(clusterPrefix + DppConfig.getHadoopConfigsKey(), StringUtils.join(configs, ";"))); } result.add(Lists.newArrayList(clusterPrefix + DppConfig.getPriorityKey(), String.valueOf(dppConfig.getPriority()))); } Collections.sort(result, new Comparator<List<String>>() { @Override public int compare(List<String> o1, List<String> o2) { return o1.get(0).compareTo(o2.get(0)); } }); return result; } public static UserProperty read(DataInput in) throws IOException { UserProperty userProperty = new UserProperty(); userProperty.readFields(in); return userProperty; } @Override public void write(DataOutput out) throws IOException { Text.writeString(out, qualifiedUser); UserResource.write(out); if (defaultLoadCluster == null) { out.writeBoolean(false); } else { out.writeBoolean(true); Text.writeString(out, defaultLoadCluster); } out.writeInt(clusterToDppConfig.size()); for (Map.Entry<String, DppConfig> entry : clusterToDppConfig.entrySet()) { Text.writeString(out, entry.getKey()); entry.getValue().write(out); } commonProperties.write(out); } public void readFields(DataInput in) throws IOException { qualifiedUser = Text.readString(in); if (Env.getCurrentEnvJournalVersion() < FeMetaVersion.VERSION_100) { long maxConn = in.readLong(); this.commonProperties.setMaxConn(maxConn); } UserResource.readIn(in); if (in.readBoolean()) { defaultLoadCluster = Text.readString(in); } int clusterNum = in.readInt(); for (int i = 0; i < clusterNum; ++i) { String cluster = Text.readString(in); DppConfig dppConfig = new DppConfig(); dppConfig.readFields(in); clusterToDppConfig.put(cluster, dppConfig); } if (Env.getCurrentEnvJournalVersion() < FeMetaVersion.VERSION_116) { whiteList.readFields(in); } else { whiteList = new WhiteList(); } if (Env.getCurrentEnvJournalVersion() >= FeMetaVersion.VERSION_100) { this.commonProperties = CommonUserProperties.read(in); } } }
That's a sanity check I copied over from the old `ClassPathPackagedProgramRetrieverTest`. I thought it's reasonable to check whether the jar loading without modifying the `java.class.path` System property. Isn't that essentially the same as what you suggest? ...just with less code?
public void testJarFromSystemClasspath() throws MalformedURLException { classpathProvider.setSystemClasspathWithTwoEntryClasses(); final Collection<String> systemClasspath = StreamSupport.stream( FromClasspathEntryClassInformationProvider.extractSystemClasspath() .spliterator(), false) .map(File::getName) .collect(Collectors.toList()); final Collection<String> expectedContent = StreamSupport.stream( classpathProvider .getURLUserClasspathWithTwoEntryClasses() .spliterator(), false) .map(URL::getPath) .map(FilenameUtils::getName) .filter(name -> name.endsWith("jar")) .collect(Collectors.toList()); assertThat( systemClasspath, IsIterableContainingInAnyOrder.containsInAnyOrder(expectedContent.toArray())); }
false)
public void testJarFromSystemClasspath() throws MalformedURLException { multipleEntryClassesClasspathProvider.setSystemClasspath(); final Collection<String> systemClasspath = StreamSupport.stream( FromClasspathEntryClassInformationProvider.extractSystemClasspath() .spliterator(), false) .map(File::getName) .collect(Collectors.toList()); final Collection<String> expectedContent = StreamSupport.stream( multipleEntryClassesClasspathProvider .getURLUserClasspath() .spliterator(), false) .map(URL::getPath) .map(FilenameUtils::getName) .filter(name -> name.endsWith("jar")) .collect(Collectors.toList()); assertThat( systemClasspath, IsIterableContainingInAnyOrder.containsInAnyOrder(expectedContent.toArray())); }
class FromClasspathEntryClassInformationProviderTest extends TestLogger { @Rule public final ClasspathProvider classpathProvider = new ClasspathProvider(); @Test public void testJobClassOnUserClasspathWithExplicitJobClassName() throws MalformedURLException, FlinkException { FromClasspathEntryClassInformationProvider testInstance = FromClasspathEntryClassInformationProvider.create( classpathProvider.getJobClassName(), classpathProvider.getURLUserClasspathWithEntryClass()); assertThat(testInstance.getJobClassName().isPresent(), is(true)); assertThat(testInstance.getJobClassName().get(), is(classpathProvider.getJobClassName())); assertThat(testInstance.getJarFile().isPresent(), is(false)); } @Test(expected = FlinkException.class) public void testJobClassOnUserClasspathWithOnlyTestFileOnClasspath() throws MalformedURLException, FlinkException { FromClasspathEntryClassInformationProvider.create( "SomeJobClassName", classpathProvider.getURLUserClasspathWithOnlyTextFile()); } @Test(expected = NullPointerException.class) public void testJobClassOnUserClasspathWithMissingJobClassName() throws MalformedURLException, FlinkException { FromClasspathEntryClassInformationProvider.create( null, classpathProvider.getURLUserClasspathWithEntryClass()); } @Test(expected = NullPointerException.class) public void testJobClassOnUserClasspathWithMissingUserClasspath() throws FlinkException { FromClasspathEntryClassInformationProvider.create("jobClassName", null); } @Test public void testJobClassOnUserClasspathWithoutExplicitJobClassName() throws IOException { FromClasspathEntryClassInformationProvider testInstance = FromClasspathEntryClassInformationProvider.createFromClasspath( classpathProvider.getURLUserClasspathWithEntryClass()); assertThat(testInstance.getJobClassName().isPresent(), is(true)); assertThat(testInstance.getJobClassName().get(), is(classpathProvider.getJobClassName())); assertThat(testInstance.getJarFile().isPresent(), is(false)); } @Test(expected = NoSuchElementException.class) public void testMissingJobClassOnUserClasspathWithoutExplicitJobClassName() throws IOException { FromClasspathEntryClassInformationProvider.createFromClasspath( classpathProvider.getURLUserClasspathWithoutEntryClass()); } @Test(expected = IllegalArgumentException.class) public void testTooManyMainMethodsOnUserClasspath() throws IOException { FromClasspathEntryClassInformationProvider.createFromClasspath( classpathProvider.getURLUserClasspathWithTwoEntryClasses()); } @Test(expected = NullPointerException.class) public void testJobClassOnUserClasspathWithoutExplicitJobClassNameAndMissingUserClasspath() throws IOException { FromClasspathEntryClassInformationProvider.createFromClasspath(null); } @Test public void testJobClassNameFromSystemClasspath() throws IOException { classpathProvider.setSystemClasspathWithEntryClass(); FromClasspathEntryClassInformationProvider testInstance = FromClasspathEntryClassInformationProvider.createFromSystemClasspath(); assertThat(testInstance.getJobClassName().isPresent(), is(true)); assertThat(testInstance.getJobClassName().get(), is(classpathProvider.getJobClassName())); assertThat(testInstance.getJarFile().isPresent(), is(false)); } @Test(expected = NoSuchElementException.class) public void testMissingJobClassNameFromSystemClasspath() throws IOException { classpathProvider.setSystemClasspathWithoutEntryClass(); FromClasspathEntryClassInformationProvider.createFromSystemClasspath(); } @Test(expected = IllegalArgumentException.class) public void testTooManyMainMethodsOnSystemClasspath() throws IOException { classpathProvider.setSystemClasspathWithTwoEntryClasses(); FromClasspathEntryClassInformationProvider.createFromSystemClasspath(); } @Test public void testJarFromSystemClasspathSanityCheck() { final Iterable<File> systemClasspath = FromClasspathEntryClassInformationProvider.extractSystemClasspath(); assertThat( StreamSupport.stream(systemClasspath.spliterator(), false) .map(File::getName) .collect(Collectors.toList()), IsCollectionContaining.hasItem(CoreMatchers.containsString("junit"))); } @Test }
class FromClasspathEntryClassInformationProviderTest extends TestLogger { @Rule public ClasspathProvider noEntryClassClasspathProvider = ClasspathProvider.createWithNoEntryClass(); @Rule public ClasspathProvider singleEntryClassClasspathProvider = ClasspathProvider.createWithSingleEntryClass(); @Rule public ClasspathProvider multipleEntryClassesClasspathProvider = ClasspathProvider.createWithMultipleEntryClasses(); @Rule public ClasspathProvider testJobEntryClassClasspathProvider = ClasspathProvider.createWithTestJobOnly(); @Rule public ClasspathProvider onlyTextFileClasspathProvider = ClasspathProvider.createWithTextFileOnly(); @Test public void testJobClassOnUserClasspathWithExplicitJobClassName() throws IOException, FlinkException { FromClasspathEntryClassInformationProvider testInstance = FromClasspathEntryClassInformationProvider.create( singleEntryClassClasspathProvider.getJobClassName(), singleEntryClassClasspathProvider.getURLUserClasspath()); assertThat(testInstance.getJobClassName().isPresent(), is(true)); assertThat( testInstance.getJobClassName().get(), is(singleEntryClassClasspathProvider.getJobClassName())); assertThat(testInstance.getJarFile().isPresent(), is(false)); } @Test(expected = FlinkException.class) public void testJobClassOnUserClasspathWithOnlyTestFileOnClasspath() throws IOException, FlinkException { FromClasspathEntryClassInformationProvider.create( "SomeJobClassName", onlyTextFileClasspathProvider.getURLUserClasspath()); } @Test(expected = NullPointerException.class) public void testJobClassOnUserClasspathWithMissingJobClassName() throws IOException, FlinkException { FromClasspathEntryClassInformationProvider.create( null, singleEntryClassClasspathProvider.getURLUserClasspath()); } @Test(expected = NullPointerException.class) public void testJobClassOnUserClasspathWithMissingUserClasspath() throws IOException, FlinkException { FromClasspathEntryClassInformationProvider.create("jobClassName", null); } @Test public void testJobClassOnUserClasspathWithoutExplicitJobClassName() throws IOException, FlinkException { FromClasspathEntryClassInformationProvider testInstance = FromClasspathEntryClassInformationProvider.createFromClasspath( singleEntryClassClasspathProvider.getURLUserClasspath()); assertThat(testInstance.getJobClassName().isPresent(), is(true)); assertThat( testInstance.getJobClassName().get(), is(singleEntryClassClasspathProvider.getJobClassName())); assertThat(testInstance.getJarFile().isPresent(), is(false)); } @Test(expected = FlinkException.class) public void testMissingJobClassOnUserClasspathWithoutExplicitJobClassName() throws IOException, FlinkException { FromClasspathEntryClassInformationProvider.createFromClasspath( noEntryClassClasspathProvider.getURLUserClasspath()); } @Test(expected = FlinkException.class) public void testTooManyMainMethodsOnUserClasspath() throws IOException, FlinkException { FromClasspathEntryClassInformationProvider.createFromClasspath( multipleEntryClassesClasspathProvider.getURLUserClasspath()); } @Test(expected = NullPointerException.class) public void testJobClassOnUserClasspathWithoutExplicitJobClassNameAndMissingUserClasspath() throws IOException, FlinkException { FromClasspathEntryClassInformationProvider.createFromClasspath(null); } @Test public void testJobClassNameFromSystemClasspath() throws IOException, FlinkException { singleEntryClassClasspathProvider.setSystemClasspath(); FromClasspathEntryClassInformationProvider testInstance = FromClasspathEntryClassInformationProvider.createFromSystemClasspath(); assertThat(testInstance.getJobClassName().isPresent(), is(true)); assertThat( testInstance.getJobClassName().get(), is(singleEntryClassClasspathProvider.getJobClassName())); assertThat(testInstance.getJarFile().isPresent(), is(false)); } @Test(expected = FlinkException.class) public void testMissingJobClassNameFromSystemClasspath() throws IOException, FlinkException { noEntryClassClasspathProvider.setSystemClasspath(); FromClasspathEntryClassInformationProvider.createFromSystemClasspath(); } @Test(expected = FlinkException.class) public void testTooManyMainMethodsOnSystemClasspath() throws IOException, FlinkException { multipleEntryClassesClasspathProvider.setSystemClasspath(); FromClasspathEntryClassInformationProvider.createFromSystemClasspath(); } @Test public void testJarFromSystemClasspathSanityCheck() { final Iterable<File> systemClasspath = FromClasspathEntryClassInformationProvider.extractSystemClasspath(); assertThat( StreamSupport.stream(systemClasspath.spliterator(), false) .map(File::getName) .collect(Collectors.toList()), IsCollectionContaining.hasItem(CoreMatchers.containsString("junit"))); } @Test }
space after reason is not applicable if the details record is empty.
public String stringValue() { return "error " + reason + " " + Optional.ofNullable(details).map(Object::toString).orElse(""); }
return "error " + reason + " " + Optional.ofNullable(details).map(Object::toString).orElse("");
public String stringValue() { return "error " + reason + Optional.ofNullable(details).map(details -> " " + details).orElse(""); }
class ErrorValue extends RuntimeException implements RefValue { private static final long serialVersionUID = 1L; private final BType type; private final String reason; private final Object details; public ErrorValue(String reason, Object details) { super(reason); this.type = new BErrorType(TypeConstants.ERROR, BTypes.typeError.getPackage(), BTypes.typeString, TypeChecker.getType(details)); this.reason = reason; this.details = details; } public ErrorValue(BType type, String reason, Object details) { super(reason); this.type = type; this.reason = reason; this.details = details; } @Override @Override public BType getType() { return type; } @Override public void stamp(BType type, List<TypeValuePair> unresolvedValues) { } @Override public Object copy(Map<Object, Object> refs) { return this; } @Override public void attemptFreeze(Status freezeStatus) { } @Override public String toString() { return stringValue(); } public String getReason() { return reason; } public Object getDetails() { if (details instanceof RefValue) { return ((RefValue) details).copy(new HashMap<>()); } return details; } @Override public void printStackTrace() { ErrorHandlerUtils.printError(ERROR_PRINT_PREFIX + getPrintableStackTrace()); } public void printStackTrace(PrintWriter printWriter) { printWriter.print(ERROR_PRINT_PREFIX + getPrintableStackTrace()); } @Override public StackTraceElement[] getStackTrace() { StackTraceElement[] stackTrace = super.getStackTrace(); List<StackTraceElement> filteredStack = new LinkedList<>(); for (int i = 0; i < stackTrace.length; i++) { StackTraceElement stackTraceElement = BallerinaErrors.filterStackTraceElement(stackTrace, i); if (stackTraceElement != null) { filteredStack.add(stackTraceElement); } } StackTraceElement[] filteredStackArray = new StackTraceElement[filteredStack.size()]; return filteredStack.toArray(filteredStackArray); } public String getPrintableStackTrace() { String errorMsg = getErrorMessage(); StringBuilder sb = new StringBuilder(); sb.append(errorMsg); StackTraceElement[] stackTrace = this.getStackTrace(); if (stackTrace.length == 0) { return sb.toString(); } sb.append("\n\tat "); printStackElement(sb, stackTrace[0], ""); for (int i = 1; i < stackTrace.length; i++) { printStackElement(sb, stackTrace[i], "\n\t "); } return sb.toString(); } private void printStackElement(StringBuilder sb, StackTraceElement stackTraceElement, String tab) { sb.append(tab).append(stackTraceElement.getMethodName()); sb.append("(").append(stackTraceElement.getFileName()); sb.append(":").append(stackTraceElement.getLineNumber()).append(")"); } private String getErrorMessage() { String errorMsg = ""; boolean reasonAdded = false; if (reason != null && !reason.isEmpty()) { errorMsg = reason; reasonAdded = true; } if (details != null) { errorMsg = errorMsg + (reasonAdded ? " " : "") + details.toString(); } return errorMsg; } /** * {@inheritDoc} */ @Override public boolean isFrozen() { return true; } }
class ErrorValue extends RuntimeException implements RefValue { private static final long serialVersionUID = 1L; private final BType type; private final String reason; private final Object details; public ErrorValue(String reason, Object details) { super(reason); this.type = new BErrorType(TypeConstants.ERROR, BTypes.typeError.getPackage(), BTypes.typeString, TypeChecker.getType(details)); this.reason = reason; this.details = details; } public ErrorValue(BType type, String reason, Object details) { super(reason); this.type = type; this.reason = reason; this.details = details; } @Override @Override public BType getType() { return type; } @Override public void stamp(BType type, List<TypeValuePair> unresolvedValues) { } @Override public Object copy(Map<Object, Object> refs) { return this; } @Override public void attemptFreeze(Status freezeStatus) { } @Override public String toString() { return stringValue(); } public String getReason() { return reason; } public Object getDetails() { if (details instanceof RefValue) { return ((RefValue) details).copy(new HashMap<>()); } return details; } @Override public void printStackTrace() { ErrorHandlerUtils.printError(ERROR_PRINT_PREFIX + getPrintableStackTrace()); } public void printStackTrace(PrintWriter printWriter) { printWriter.print(ERROR_PRINT_PREFIX + getPrintableStackTrace()); } @Override public StackTraceElement[] getStackTrace() { StackTraceElement[] stackTrace = super.getStackTrace(); List<StackTraceElement> filteredStack = new LinkedList<>(); for (int i = 0; i < stackTrace.length; i++) { StackTraceElement stackTraceElement = BallerinaErrors.filterStackTraceElement(stackTrace, i); if (stackTraceElement != null) { filteredStack.add(stackTraceElement); } } StackTraceElement[] filteredStackArray = new StackTraceElement[filteredStack.size()]; return filteredStack.toArray(filteredStackArray); } public String getPrintableStackTrace() { String errorMsg = getErrorMessage(); StringBuilder sb = new StringBuilder(); sb.append(errorMsg); StackTraceElement[] stackTrace = this.getStackTrace(); if (stackTrace.length == 0) { return sb.toString(); } sb.append("\n\tat "); printStackElement(sb, stackTrace[0], ""); for (int i = 1; i < stackTrace.length; i++) { printStackElement(sb, stackTrace[i], "\n\t "); } return sb.toString(); } private void printStackElement(StringBuilder sb, StackTraceElement stackTraceElement, String tab) { sb.append(tab).append(stackTraceElement.getMethodName()); sb.append("(").append(stackTraceElement.getFileName()); sb.append(":").append(stackTraceElement.getLineNumber()).append(")"); } private String getErrorMessage() { String errorMsg = ""; boolean reasonAdded = false; if (reason != null && !reason.isEmpty()) { errorMsg = reason; reasonAdded = true; } if (details != null) { errorMsg = errorMsg + (reasonAdded ? " " : "") + details.toString(); } return errorMsg; } /** * {@inheritDoc} */ @Override public boolean isFrozen() { return true; } }
The same comment about moving it inside `SavepointFormatType`
public SavepointOptions(CommandLine line) { super(line); args = line.getArgs(); dispose = line.hasOption(SAVEPOINT_DISPOSE_OPTION.getOpt()); disposeSavepointPath = line.getOptionValue(SAVEPOINT_DISPOSE_OPTION.getOpt()); jarFile = line.getOptionValue(JAR_OPTION.getOpt()); if (line.hasOption(SAVEPOINT_FORMAT_OPTION)) { formatType = ConfigurationUtils.convertValue( line.getOptionValue(SAVEPOINT_FORMAT_OPTION), SavepointFormatType.class); } else { formatType = SavepointFormatType.DEFAULT; } }
formatType = SavepointFormatType.DEFAULT;
public SavepointOptions(CommandLine line) { super(line); args = line.getArgs(); dispose = line.hasOption(SAVEPOINT_DISPOSE_OPTION.getOpt()); disposeSavepointPath = line.getOptionValue(SAVEPOINT_DISPOSE_OPTION.getOpt()); jarFile = line.getOptionValue(JAR_OPTION.getOpt()); if (line.hasOption(SAVEPOINT_FORMAT_OPTION)) { formatType = ConfigurationUtils.convertValue( line.getOptionValue(SAVEPOINT_FORMAT_OPTION), SavepointFormatType.class); } else { formatType = SavepointFormatType.DEFAULT; } }
class SavepointOptions extends CommandLineOptions { private final String[] args; private final SavepointFormatType formatType; private boolean dispose; private String disposeSavepointPath; private String jarFile; public String[] getArgs() { return args == null ? new String[0] : args; } public boolean isDispose() { return dispose; } public String getSavepointPath() { return disposeSavepointPath; } public String getJarFilePath() { return jarFile; } public SavepointFormatType getFormatType() { return formatType; } }
class SavepointOptions extends CommandLineOptions { private final String[] args; private final SavepointFormatType formatType; private boolean dispose; private String disposeSavepointPath; private String jarFile; public String[] getArgs() { return args == null ? new String[0] : args; } public boolean isDispose() { return dispose; } public String getSavepointPath() { return disposeSavepointPath; } public String getJarFilePath() { return jarFile; } public SavepointFormatType getFormatType() { return formatType; } }
The testSuiteMap contains 'TestSuites' for each module. If the map is empty, then the TestRunner shouldn't be running at all since no TestSuites have been initialized at compile time. Even if the TestSuite has no tests, it will still count as part of the map. So having the map empty and running the TestRunner is the problem which is why the exit status is 1 in this case.
public static void main(String[] args) throws IOException { int exitStatus = 0; int result; Path testCache = Paths.get(args[0]); String target = args[1]; boolean report = Boolean.valueOf(args[2]); boolean coverage = Boolean.valueOf(args[3]); if (report || coverage) { testReport = new TestReport(); } out.println(); out.print("Running Tests"); if (coverage) { out.print(" with Coverage"); } out.println(); Path testSuiteCachePath = testCache.resolve(TesterinaConstants.TESTERINA_TEST_SUITE); try (BufferedReader br = Files.newBufferedReader(testSuiteCachePath, StandardCharsets.UTF_8)) { Gson gson = new Gson(); Map<String, TestSuite> testSuiteMap = gson.fromJson(br, new TypeToken<Map<String, TestSuite>>() { }.getType()); if (!testSuiteMap.isEmpty()) { for (Map.Entry<String, TestSuite> entry : testSuiteMap.entrySet()) { String moduleName = entry.getKey(); TestSuite testSuite = entry.getValue(); List<String> configList = new ArrayList<>(); configList.add(target); configList.add(testSuite.getOrgName()); configList.add(testSuite.getPackageName()); configList.add("\"" + moduleName + "\""); configList.addAll(Lists.of(Arrays.copyOfRange(args, 4, args.length))); String[] configArgs = configList.toArray(new String[0]); LaunchUtils.initConfigurations(configArgs); out.println("\n\t" + (moduleName.equals(testSuite.getPackageName()) ? moduleName : testSuite.getPackageName() + TesterinaConstants.DOT + moduleName)); testSuite.setModuleName(moduleName); List<String> testExecutionDependencies = testSuite.getTestExecutionDependencies(); classLoader = createClassLoader(testExecutionDependencies); Path jsonTmpSummaryPath = testCache.resolve(moduleName).resolve(TesterinaConstants.STATUS_FILE); result = startTestSuit(Paths.get(testSuite.getSourceRootPath()), testSuite, jsonTmpSummaryPath, classLoader); exitStatus = (result == 1) ? result : exitStatus; } } else { exitStatus = 1; } } Runtime.getRuntime().exit(exitStatus); }
exitStatus = 1;
public static void main(String[] args) throws IOException { int exitStatus = 0; int result; if (args.length >= 4) { Path testCache = Paths.get(args[0]); String target = args[1]; boolean report = Boolean.valueOf(args[2]); boolean coverage = Boolean.valueOf(args[3]); if (report || coverage) { testReport = new TestReport(); } out.println(); out.print("Running Tests"); if (coverage) { out.print(" with Coverage"); } out.println(); Path testSuiteCachePath = testCache.resolve(TesterinaConstants.TESTERINA_TEST_SUITE); try (BufferedReader br = Files.newBufferedReader(testSuiteCachePath, StandardCharsets.UTF_8)) { Gson gson = new Gson(); Map<String, TestSuite> testSuiteMap = gson.fromJson(br, new TypeToken<Map<String, TestSuite>>() { }.getType()); if (!testSuiteMap.isEmpty()) { for (Map.Entry<String, TestSuite> entry : testSuiteMap.entrySet()) { String moduleName = entry.getKey(); TestSuite testSuite = entry.getValue(); List<String> configList = new ArrayList<>(); configList.add(target); configList.add(testSuite.getOrgName()); configList.add(testSuite.getPackageName()); configList.add("\"" + moduleName + "\""); configList.addAll(Lists.of(Arrays.copyOfRange(args, 4, args.length))); String[] configArgs = configList.toArray(new String[0]); LaunchUtils.initConfigurations(configArgs); out.println("\n\t" + (moduleName.equals(testSuite.getPackageName()) ? moduleName : testSuite.getPackageName() + TesterinaConstants.DOT + moduleName)); testSuite.setModuleName(moduleName); List<String> testExecutionDependencies = testSuite.getTestExecutionDependencies(); classLoader = createClassLoader(testExecutionDependencies); Path jsonTmpSummaryPath = testCache.resolve(moduleName).resolve(TesterinaConstants.STATUS_FILE); result = startTestSuit(Paths.get(testSuite.getSourceRootPath()), testSuite, jsonTmpSummaryPath, classLoader); exitStatus = (result == 1) ? result : exitStatus; } } else { exitStatus = 1; } } } else { exitStatus = 1; } Runtime.getRuntime().exit(exitStatus); }
class Main { private static final PrintStream out = System.out; static TestReport testReport; static ClassLoader classLoader; private static int startTestSuit(Path sourceRootPath, TestSuite testSuite, Path jsonTmpSummaryPath, ClassLoader classLoader) throws IOException { int exitStatus = 0; try { TesterinaUtils.executeTests(sourceRootPath, testSuite, classLoader); } catch (RuntimeException e) { exitStatus = 1; } finally { if (testSuite.isReportRequired()) { writeStatusToJsonFile(ModuleStatus.getInstance(), jsonTmpSummaryPath); ModuleStatus.clearInstance(); } return exitStatus; } } private static void writeStatusToJsonFile(ModuleStatus moduleStatus, Path tmpJsonPath) throws IOException { File jsonFile = new File(tmpJsonPath.toString()); if (!Files.exists(tmpJsonPath.getParent())) { Files.createDirectories(tmpJsonPath.getParent()); } try (Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile), StandardCharsets.UTF_8)) { Gson gson = new Gson(); String json = gson.toJson(moduleStatus); writer.write(new String(json.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8)); } } public static URLClassLoader createClassLoader(List<String> jarFilePaths) { List<URL> urlList = new ArrayList<>(); for (String jarFilePath : jarFilePaths) { try { urlList.add(Paths.get(jarFilePath).toUri().toURL()); } catch (MalformedURLException e) { throw new RuntimeException("Failed to create classloader with all jar files", e); } } return AccessController.doPrivileged( (PrivilegedAction<URLClassLoader>) () -> new URLClassLoader(urlList.toArray(new URL[0]), ClassLoader.getSystemClassLoader())); } public static ClassLoader getClassLoader() { return classLoader; } }
class Main { private static final PrintStream out = System.out; static TestReport testReport; static ClassLoader classLoader; private static int startTestSuit(Path sourceRootPath, TestSuite testSuite, Path jsonTmpSummaryPath, ClassLoader classLoader) throws IOException { int exitStatus = 0; try { TesterinaUtils.executeTests(sourceRootPath, testSuite, classLoader); } catch (RuntimeException e) { exitStatus = 1; } finally { if (testSuite.isReportRequired()) { writeStatusToJsonFile(ModuleStatus.getInstance(), jsonTmpSummaryPath); ModuleStatus.clearInstance(); } return exitStatus; } } private static void writeStatusToJsonFile(ModuleStatus moduleStatus, Path tmpJsonPath) throws IOException { File jsonFile = new File(tmpJsonPath.toString()); if (!Files.exists(tmpJsonPath.getParent())) { Files.createDirectories(tmpJsonPath.getParent()); } try (Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile), StandardCharsets.UTF_8)) { Gson gson = new Gson(); String json = gson.toJson(moduleStatus); writer.write(new String(json.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8)); } } public static URLClassLoader createClassLoader(List<String> jarFilePaths) { List<URL> urlList = new ArrayList<>(); for (String jarFilePath : jarFilePaths) { try { urlList.add(Paths.get(jarFilePath).toUri().toURL()); } catch (MalformedURLException e) { throw new RuntimeException("Failed to create classloader with all jar files", e); } } return AccessController.doPrivileged( (PrivilegedAction<URLClassLoader>) () -> new URLClassLoader(urlList.toArray(new URL[0]), ClassLoader.getSystemClassLoader())); } public static ClassLoader getClassLoader() { return classLoader; } }
Consider something like ```suggestion return find(tenant, zoneBuckets).map(ArchiveBucket::bucketArn).orElseGet(() -> assignToBucket(zoneId, tenant)); ``` even better if `find()` does the mapping since both users just want the bucket ARN anyway...
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) { var zoneBuckets = curatorDb.readArchiveBuckets(zoneId); if (find(tenant, zoneBuckets).isPresent()) return find(tenant, zoneBuckets).get().bucketArn(); else return assignToBucket(zoneId, tenant); }
else return assignToBucket(zoneId, tenant);
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) { var zoneBuckets = curatorDb.readArchiveBuckets(zoneId); return find(tenant, zoneBuckets).orElseGet(() -> assignToBucket(zoneId, tenant)); }
class CuratorArchiveBucketDb extends AbstractComponent implements ArchiveBucketDb { /** * Due to policy limits, we can't put data for more than this many tenants in a bucket. * Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants. * We set the maximum a bit lower to have a solid margin of error. */ private final static int TENANTS_PER_BUCKET = 30; private final ArchiveService archiveService; private final CuratorDb curatorDb; private final StringFlag bucketNameFlag; @Inject public CuratorArchiveBucketDb(Controller controller) { this.archiveService = controller.serviceRegistry().archiveService(); this.curatorDb = controller.curator(); this.bucketNameFlag = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(controller.flagSource()); } @Override public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) { String bucketArn = bucketNameFlag .with(FetchVector.Dimension.ZONE_ID, zoneId.value()) .with(FetchVector.Dimension.TENANT_ID, tenant.value()) .value(); if (bucketArn.isBlank()) return Optional.empty(); if ("auto".equals(bucketArn)) bucketArn = findOrAssignBucket(zoneId, tenant); return Optional.of(URI.create(String.format("s3: } private String assignToBucket(ZoneId zoneId, TenantName tenant) { try (var lock = curatorDb.lockArchiveBuckets(zoneId)) { Set<ArchiveBucket> zoneBuckets = new HashSet<>(curatorDb.readArchiveBuckets(zoneId)); if (find(tenant, zoneBuckets).isPresent()) return find(tenant, zoneBuckets).get().bucketArn(); Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream() .filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET) .findAny(); if (unfilledBucket.isPresent()) { var unfilled = unfilledBucket.get(); var tenants = new HashSet<>(unfilled.tenants()); tenants.add(tenant); var updatedBucket = new ArchiveBucket(unfilled.bucketArn(), unfilled.keyArn(), tenants); zoneBuckets.remove(unfilled); zoneBuckets.add(updatedBucket); curatorDb.writeArchiveBuckets(zoneId, zoneBuckets); return updatedBucket.bucketArn(); } var newBucket = archiveService.createArchiveBucketFor(zoneId, Set.of(tenant)); zoneBuckets.add(newBucket); curatorDb.writeArchiveBuckets(zoneId, zoneBuckets); return newBucket.bucketArn(); } } @NotNull private Optional<ArchiveBucket> find(TenantName tenant, Set<ArchiveBucket> zoneBuckets) { return zoneBuckets.stream().filter(bucket -> bucket.tenants().contains(tenant)).findAny(); } @Override public Set<ArchiveBucket> buckets(ZoneId zoneId) { return curatorDb.readArchiveBuckets(zoneId); } }
class CuratorArchiveBucketDb implements ArchiveBucketDb { /** * Due to policy limits, we can't put data for more than this many tenants in a bucket. * Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants. * We set the maximum a bit lower to have a solid margin of error. */ private final static int TENANTS_PER_BUCKET = 30; private final ArchiveService archiveService; private final CuratorDb curatorDb; private final StringFlag bucketNameFlag; @Inject public CuratorArchiveBucketDb(Controller controller) { this.archiveService = controller.serviceRegistry().archiveService(); this.curatorDb = controller.curator(); this.bucketNameFlag = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(controller.flagSource()); } @Override public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) { String bucketName = bucketNameFlag .with(FetchVector.Dimension.ZONE_ID, zoneId.value()) .with(FetchVector.Dimension.TENANT_ID, tenant.value()) .value(); if (bucketName.isBlank()) return Optional.empty(); if ("auto".equals(bucketName)) bucketName = findOrAssignBucket(zoneId, tenant); return Optional.of(URI.create(String.format("s3: } private String assignToBucket(ZoneId zoneId, TenantName tenant) { try (var lock = curatorDb.lockArchiveBuckets(zoneId)) { Set<ArchiveBucket> zoneBuckets = new HashSet<>(curatorDb.readArchiveBuckets(zoneId)); return find(tenant, zoneBuckets) .orElseGet(() -> { Optional<ArchiveBucket> unfilledBucket = zoneBuckets.stream() .filter(bucket -> bucket.tenants().size() < TENANTS_PER_BUCKET) .findAny(); if (unfilledBucket.isPresent()) { var unfilled = unfilledBucket.get(); zoneBuckets.remove(unfilled); zoneBuckets.add(unfilled.withTenant(tenant)); curatorDb.writeArchiveBuckets(zoneId, zoneBuckets); return unfilled.bucketName(); } var newBucket = archiveService.createArchiveBucketFor(zoneId).withTenant(tenant); zoneBuckets.add(newBucket); curatorDb.writeArchiveBuckets(zoneId, zoneBuckets); return newBucket.bucketName(); }); } } @NotNull private Optional<String> find(TenantName tenant, Set<ArchiveBucket> zoneBuckets) { return zoneBuckets.stream() .filter(bucket -> bucket.tenants().contains(tenant)) .findAny() .map(ArchiveBucket::bucketName); } @Override public Set<ArchiveBucket> buckets(ZoneId zoneId) { return curatorDb.readArchiveBuckets(zoneId); } }
These two asserts only checks for the counts. We need to assert the content as well, to see whether they were updated correctly or not.
public void testSeparatedListNodeAllNodeModification() { SyntaxTree syntaxTree = parseFile("separated_node_list_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); ModuleVariableDeclarationNode oldModuleVariableDeclarationNode = (ModuleVariableDeclarationNode) oldRoot.members().get(0); ListConstructorExpressionNode oldSeperatedlistNode = (ListConstructorExpressionNode) oldModuleVariableDeclarationNode.initializer(); TokenModifier tokenModifier = new TokenModifier(); ModulePartNode newRoot = (ModulePartNode) oldRoot.apply(tokenModifier); ModuleVariableDeclarationNode newModuleVariableDeclarationNode = (ModuleVariableDeclarationNode) newRoot.members().get(0); ListConstructorExpressionNode newSeperatedlistNode = (ListConstructorExpressionNode) newModuleVariableDeclarationNode.initializer(); Assert.assertEquals(oldSeperatedlistNode.expressions().separatorSize(), newSeperatedlistNode.expressions().separatorSize()); Assert.assertEquals(oldSeperatedlistNode.expressions().size(), newSeperatedlistNode.expressions().size()); }
newSeperatedlistNode.expressions().size());
public void testSeparatedListNodeAllNodeModification() { SyntaxTree syntaxTree = parseFile("separated_node_list_modify_all_nodes.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); WhiteSpaceMinutiaeRemover whiteSpaceMinutiaeRemover = new WhiteSpaceMinutiaeRemover(); ModulePartNode newRoot = (ModulePartNode) oldRoot.apply(whiteSpaceMinutiaeRemover); String expectedStr = getFileContentAsString("separated_node_list_modify_all_nodes_assert.bal"); String actualStr = newRoot.toString(); Assert.assertEquals(actualStr, expectedStr); }
class SyntaxTreeModifierTest extends AbstractSyntaxTreeAPITest { @Test public void testVarDeclStmtModification() { SyntaxTree syntaxTree = parseFile("variable_decl_stmt_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); VariableDeclModifier variableDeclModifier = new VariableDeclModifier(); ModulePartNode newRoot = (ModulePartNode) oldRoot.apply(variableDeclModifier); FunctionDefinitionNode oldFuncNode = (FunctionDefinitionNode) oldRoot.members().get(0); FunctionBodyBlockNode oldFuncBody = (FunctionBodyBlockNode) oldFuncNode.functionBody(); VariableDeclarationNode oldStmt = (VariableDeclarationNode) oldFuncBody.statements().get(0); Token oldVarName = ((CaptureBindingPatternNode) oldStmt.typedBindingPattern().bindingPattern()).variableName(); FunctionDefinitionNode newFuncNode = (FunctionDefinitionNode) newRoot.members().get(0); FunctionBodyBlockNode newFuncBody = (FunctionBodyBlockNode) newFuncNode.functionBody(); VariableDeclarationNode newStmt = (VariableDeclarationNode) newFuncBody.statements().get(0); Token newVarName = ((CaptureBindingPatternNode) newStmt.typedBindingPattern().bindingPattern()).variableName(); Assert.assertNotEquals(newFuncNode, oldFuncNode); Assert.assertNotEquals(newStmt, oldStmt); Assert.assertEquals(newVarName.text(), oldVarName.text() + "new"); Assert.assertEquals(newStmt.textRangeWithMinutiae().length(), oldStmt.textRangeWithMinutiae().length() + 2); } @Test public void testRenameIdentifierWithoutTrivia() { SyntaxTree syntaxTree = parseFile("variable_decl_stmt_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); IdentifierModifier identifierModifier = new IdentifierModifier(); ModulePartNode newRoot = (ModulePartNode) oldRoot.apply(identifierModifier); FunctionDefinitionNode oldFuncNode = (FunctionDefinitionNode) oldRoot.members().get(0); String oldFuncName = oldFuncNode.functionName().text(); FunctionDefinitionNode newFuncNode = (FunctionDefinitionNode) newRoot.members().get(0); String newFuncName = newFuncNode.functionName().text(); Assert.assertEquals(newFuncName, oldFuncName + "_new"); } @Test public void testBinaryExprModification() { SyntaxTree syntaxTree = parseFile("binary_expression_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); BinaryExpressionModifier binaryExprModifier = new BinaryExpressionModifier(); ModulePartNode newRoot = binaryExprModifier.transform(oldRoot); Predicate<SyntaxKind> plusOrAsteriskTokenPredicate = syntaxKind -> SyntaxKind.PLUS_TOKEN == syntaxKind || SyntaxKind.ASTERISK_TOKEN == syntaxKind; Predicate<SyntaxKind> minusOrSlashTokenPredicate = syntaxKind -> SyntaxKind.MINUS_TOKEN == syntaxKind || SyntaxKind.SLASH_TOKEN == syntaxKind; TokenCounter plusOrAsteriskCounter = new TokenCounter(plusOrAsteriskTokenPredicate); TokenCounter minusOrSlashCounter = new TokenCounter(minusOrSlashTokenPredicate); Assert.assertEquals(plusOrAsteriskCounter.transform(newRoot), new Integer(0)); Assert.assertEquals(minusOrSlashCounter.transform(newRoot), new Integer(4)); } @Test public void testSeparatedListNodeNonSeperatorModification() { SyntaxTree syntaxTree = parseFile("separated_node_list_modify_all_nodes.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); ModuleVariableDeclarationNode oldModuleVariableDeclarationNode = (ModuleVariableDeclarationNode) oldRoot.members().get(0); ListConstructorExpressionNode oldSeperatedlistNode = (ListConstructorExpressionNode) oldModuleVariableDeclarationNode.initializer(); IdentifierModifier identifierModifier = new IdentifierModifier(); ModulePartNode newRoot = (ModulePartNode) oldRoot.apply(identifierModifier); ModuleVariableDeclarationNode newModuleVariableDeclarationNode = (ModuleVariableDeclarationNode) newRoot.members().get(0); ListConstructorExpressionNode newSeperatedlistNode = (ListConstructorExpressionNode) newModuleVariableDeclarationNode.initializer(); Assert.assertEquals(oldSeperatedlistNode.expressions().separatorSize(), newSeperatedlistNode.expressions().separatorSize()); Assert.assertEquals(oldSeperatedlistNode.expressions().size(), newSeperatedlistNode.expressions().size()); } @Test /** * An implementation of {@code TreeModifier} that modify all variable declaration statements. */ private static class VariableDeclModifier extends TreeModifier { @Override public VariableDeclarationNode transform(VariableDeclarationNode varDeclStmt) { TypedBindingPatternNode typedBindingPattern = varDeclStmt.typedBindingPattern(); Token varNameToken = ((CaptureBindingPatternNode) typedBindingPattern.bindingPattern()).variableName(); IdentifierToken newVarName = NodeFactory.createIdentifierToken(varNameToken.text() + "new"); CaptureBindingPatternNode newCaptureBP = NodeFactory.createCaptureBindingPatternNode(newVarName); TypedBindingPatternNode newTypedBP = NodeFactory.createTypedBindingPatternNode(typedBindingPattern.typeDescriptor(), newCaptureBP); return NodeFactory.createVariableDeclarationNode(varDeclStmt.annotations(), varDeclStmt.finalKeyword().orElse(null), newTypedBP, varDeclStmt.equalsToken().orElse(null), varDeclStmt.initializer().orElse(null), varDeclStmt.semicolonToken()); } } /** * An implementation of {@code TreeModifier} that rename all identifiers in the tree. */ private static class IdentifierModifier extends TreeModifier { @Override public IdentifierToken transform(IdentifierToken identifier) { return identifier.modify(identifier.text() + "_new"); } } /** * An implementation of {@code TreeModifier} that removes all white space minutiae from all tokens. */ private static class TokenModifier extends TreeModifier { @Override public Token transform(Token token) { Predicate<Minutiae> minutiaePredicate = minutiae -> minutiae.kind() == SyntaxKind.WHITESPACE_MINUTIAE; MinutiaeList oldLeadingMinutiae = token.leadingMinutiae(); MinutiaeList oldTrailingMinutiae = token.trailingMinutiae(); Collection<Minutiae> matchingLeadingMinutiae = getMatchingMinutiae(oldLeadingMinutiae, minutiaePredicate); Collection<Minutiae> matchingTrailingMinutiae = getMatchingMinutiae(oldTrailingMinutiae, minutiaePredicate); MinutiaeList newLeadingMinutiae = oldLeadingMinutiae.removeAll(matchingLeadingMinutiae); MinutiaeList newTrailingMinutiae = oldTrailingMinutiae.removeAll(matchingTrailingMinutiae); return token.modify(newLeadingMinutiae, newTrailingMinutiae); } } private static Collection<Minutiae> getMatchingMinutiae(MinutiaeList leadingMinutiae, Predicate<Minutiae> predicate) { Collection<Minutiae> c = new ArrayList<>(); for (int i = 0; i < leadingMinutiae.size(); i++) { Minutiae minutiae = leadingMinutiae.get(i); if (predicate.test(minutiae)) { c.add(minutiae); } } return c; } /** * An implementation of {@code TreeModifier} that perform random changes. * Transform + to -. * Transform * to /. */ private static class BinaryExpressionModifier extends TreeModifier { @Override public BinaryExpressionNode transform(BinaryExpressionNode binaryExprNode) { Node newLHSExpr = modifyNode(binaryExprNode.lhsExpr()); Node newRHSExpr = modifyNode(binaryExprNode.rhsExpr()); Token newOperator; Token oldOperator = binaryExprNode.operator(); switch (oldOperator.kind()) { case PLUS_TOKEN: newOperator = NodeFactory.createToken(SyntaxKind.MINUS_TOKEN, oldOperator.leadingMinutiae(), oldOperator.trailingMinutiae()); break; case ASTERISK_TOKEN: newOperator = NodeFactory.createToken(SyntaxKind.SLASH_TOKEN, oldOperator.leadingMinutiae(), oldOperator.trailingMinutiae()); break; default: newOperator = oldOperator; } return binaryExprNode.modify().withOperator(newOperator).withLhsExpr(newLHSExpr).withRhsExpr(newRHSExpr) .apply(); } } /** * An implementation of {@code NodeTransformer} that counts the number of token that matches a given predicate. */ private static class TokenCounter extends NodeTransformer<Integer> { private final Predicate<SyntaxKind> predicate; public TokenCounter(Predicate<SyntaxKind> predicate) { this.predicate = predicate; } @Override public Integer transform(Token token) { SyntaxKind syntaxKind = token.kind(); return predicate.test(syntaxKind) ? 1 : 0; } @Override protected Integer transformSyntaxNode(Node node) { if (node instanceof Token) { return node.apply(this); } int tokenCount = 0; NonTerminalNode nonTerminalNode = (NonTerminalNode) node; for (Node child : nonTerminalNode.children()) { tokenCount += child.apply(this); } return tokenCount; } } }
class SyntaxTreeModifierTest extends AbstractSyntaxTreeAPITest { @Test public void testVarDeclStmtModification() { SyntaxTree syntaxTree = parseFile("variable_decl_stmt_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); VariableDeclModifier variableDeclModifier = new VariableDeclModifier(); ModulePartNode newRoot = (ModulePartNode) oldRoot.apply(variableDeclModifier); FunctionDefinitionNode oldFuncNode = (FunctionDefinitionNode) oldRoot.members().get(0); FunctionBodyBlockNode oldFuncBody = (FunctionBodyBlockNode) oldFuncNode.functionBody(); VariableDeclarationNode oldStmt = (VariableDeclarationNode) oldFuncBody.statements().get(0); Token oldVarName = ((CaptureBindingPatternNode) oldStmt.typedBindingPattern().bindingPattern()).variableName(); FunctionDefinitionNode newFuncNode = (FunctionDefinitionNode) newRoot.members().get(0); FunctionBodyBlockNode newFuncBody = (FunctionBodyBlockNode) newFuncNode.functionBody(); VariableDeclarationNode newStmt = (VariableDeclarationNode) newFuncBody.statements().get(0); Token newVarName = ((CaptureBindingPatternNode) newStmt.typedBindingPattern().bindingPattern()).variableName(); Assert.assertNotEquals(newFuncNode, oldFuncNode); Assert.assertNotEquals(newStmt, oldStmt); Assert.assertEquals(newVarName.text(), oldVarName.text() + "new"); Assert.assertEquals(newStmt.textRangeWithMinutiae().length(), oldStmt.textRangeWithMinutiae().length() + 2); } @Test public void testRenameIdentifierWithoutTrivia() { SyntaxTree syntaxTree = parseFile("variable_decl_stmt_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); IdentifierModifier identifierModifier = new IdentifierModifier(); ModulePartNode newRoot = (ModulePartNode) oldRoot.apply(identifierModifier); FunctionDefinitionNode oldFuncNode = (FunctionDefinitionNode) oldRoot.members().get(0); String oldFuncName = oldFuncNode.functionName().text(); FunctionDefinitionNode newFuncNode = (FunctionDefinitionNode) newRoot.members().get(0); String newFuncName = newFuncNode.functionName().text(); Assert.assertEquals(newFuncName, oldFuncName + "_new"); } @Test public void testBinaryExprModification() { SyntaxTree syntaxTree = parseFile("binary_expression_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); BinaryExpressionModifier binaryExprModifier = new BinaryExpressionModifier(); ModulePartNode newRoot = binaryExprModifier.transform(oldRoot); Predicate<SyntaxKind> plusOrAsteriskTokenPredicate = syntaxKind -> SyntaxKind.PLUS_TOKEN == syntaxKind || SyntaxKind.ASTERISK_TOKEN == syntaxKind; Predicate<SyntaxKind> minusOrSlashTokenPredicate = syntaxKind -> SyntaxKind.MINUS_TOKEN == syntaxKind || SyntaxKind.SLASH_TOKEN == syntaxKind; TokenCounter plusOrAsteriskCounter = new TokenCounter(plusOrAsteriskTokenPredicate); TokenCounter minusOrSlashCounter = new TokenCounter(minusOrSlashTokenPredicate); Assert.assertEquals(plusOrAsteriskCounter.transform(newRoot), new Integer(0)); Assert.assertEquals(minusOrSlashCounter.transform(newRoot), new Integer(4)); } @Test public void testSeparatedListNodeNonSeperatorModification() { SyntaxTree syntaxTree = parseFile("separated_node_list_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); IdentifierModifier identifierModifier = new IdentifierModifier(); ModulePartNode newRoot = (ModulePartNode) oldRoot.apply(identifierModifier); String expectedStr = getFileContentAsString("separated_node_list_modify_assert.bal"); String actualStr = newRoot.toString(); Assert.assertEquals(actualStr, expectedStr); } @Test /** * An implementation of {@code TreeModifier} that modify all variable declaration statements. */ private static class VariableDeclModifier extends TreeModifier { @Override public VariableDeclarationNode transform(VariableDeclarationNode varDeclStmt) { TypedBindingPatternNode typedBindingPattern = varDeclStmt.typedBindingPattern(); Token varNameToken = ((CaptureBindingPatternNode) typedBindingPattern.bindingPattern()).variableName(); IdentifierToken newVarName = NodeFactory.createIdentifierToken(varNameToken.text() + "new"); CaptureBindingPatternNode newCaptureBP = NodeFactory.createCaptureBindingPatternNode(newVarName); TypedBindingPatternNode newTypedBP = NodeFactory.createTypedBindingPatternNode(typedBindingPattern.typeDescriptor(), newCaptureBP); return NodeFactory.createVariableDeclarationNode(varDeclStmt.annotations(), varDeclStmt.finalKeyword().orElse(null), newTypedBP, varDeclStmt.equalsToken().orElse(null), varDeclStmt.initializer().orElse(null), varDeclStmt.semicolonToken()); } } /** * An implementation of {@code TreeModifier} that rename all identifiers in the tree. */ private static class IdentifierModifier extends TreeModifier { @Override public IdentifierToken transform(IdentifierToken identifier) { return identifier.modify(identifier.text() + "_new"); } } /** * An implementation of {@code TreeModifier} that removes all white space minutiae from all tokens. */ private static class WhiteSpaceMinutiaeRemover extends TreeModifier { @Override public Token transform(Token token) { Predicate<Minutiae> minutiaePredicate = minutiae -> minutiae.kind() == SyntaxKind.WHITESPACE_MINUTIAE; MinutiaeList oldLeadingMinutiae = token.leadingMinutiae(); MinutiaeList oldTrailingMinutiae = token.trailingMinutiae(); Collection<Minutiae> matchingLeadingMinutiae = getMatchingMinutiae(oldLeadingMinutiae, minutiaePredicate); Collection<Minutiae> matchingTrailingMinutiae = getMatchingMinutiae(oldTrailingMinutiae, minutiaePredicate); MinutiaeList newLeadingMinutiae = oldLeadingMinutiae.removeAll(matchingLeadingMinutiae); MinutiaeList newTrailingMinutiae = oldTrailingMinutiae.removeAll(matchingTrailingMinutiae); return token.modify(newLeadingMinutiae, newTrailingMinutiae); } @Override public IdentifierToken transform(IdentifierToken identifierToken) { return (IdentifierToken) this.transform((Token) identifierToken); } } private static Collection<Minutiae> getMatchingMinutiae(MinutiaeList leadingMinutiae, Predicate<Minutiae> predicate) { Collection<Minutiae> c = new ArrayList<>(); for (int i = 0; i < leadingMinutiae.size(); i++) { Minutiae minutiae = leadingMinutiae.get(i); if (predicate.test(minutiae)) { c.add(minutiae); } } return c; } /** * An implementation of {@code TreeModifier} that perform random changes. * Transform + to -. * Transform * to /. */ private static class BinaryExpressionModifier extends TreeModifier { @Override public BinaryExpressionNode transform(BinaryExpressionNode binaryExprNode) { Node newLHSExpr = modifyNode(binaryExprNode.lhsExpr()); Node newRHSExpr = modifyNode(binaryExprNode.rhsExpr()); Token newOperator; Token oldOperator = binaryExprNode.operator(); switch (oldOperator.kind()) { case PLUS_TOKEN: newOperator = NodeFactory.createToken(SyntaxKind.MINUS_TOKEN, oldOperator.leadingMinutiae(), oldOperator.trailingMinutiae()); break; case ASTERISK_TOKEN: newOperator = NodeFactory.createToken(SyntaxKind.SLASH_TOKEN, oldOperator.leadingMinutiae(), oldOperator.trailingMinutiae()); break; default: newOperator = oldOperator; } return binaryExprNode.modify().withOperator(newOperator).withLhsExpr(newLHSExpr).withRhsExpr(newRHSExpr) .apply(); } } /** * An implementation of {@code NodeTransformer} that counts the number of token that matches a given predicate. */ private static class TokenCounter extends NodeTransformer<Integer> { private final Predicate<SyntaxKind> predicate; public TokenCounter(Predicate<SyntaxKind> predicate) { this.predicate = predicate; } @Override public Integer transform(Token token) { SyntaxKind syntaxKind = token.kind(); return predicate.test(syntaxKind) ? 1 : 0; } @Override protected Integer transformSyntaxNode(Node node) { if (node instanceof Token) { return node.apply(this); } int tokenCount = 0; NonTerminalNode nonTerminalNode = (NonTerminalNode) node; for (Node child : nonTerminalNode.children()) { tokenCount += child.apply(this); } return tokenCount; } } }
No, NA would pick up the context set during the last HA tick.
public NodeAgentContext nextContext() throws InterruptedException { synchronized (monitor) { nextContext = null; Duration untilNextContext = Duration.ZERO; while (setAndGetIsFrozen(wantFrozen) || nextContext == null || (untilNextContext = Duration.between(Instant.now(), nextContextAt)).toMillis() > 0) { if (pendingInterrupt) { pendingInterrupt = false; throw new InterruptedException("interrupt() was called before next context was scheduled"); } try { monitor.wait(Math.max(untilNextContext.toMillis(), 0L)); } catch (InterruptedException ignored) { } } currentContext = nextContext; return currentContext; } }
currentContext = nextContext;
public NodeAgentContext nextContext() throws InterruptedException { synchronized (monitor) { nextContext = null; Duration untilNextContext = Duration.ZERO; while (setAndGetIsFrozen(wantFrozen) || nextContext == null || (untilNextContext = Duration.between(Instant.now(), nextContextAt)).toMillis() > 0) { if (pendingInterrupt) { pendingInterrupt = false; throw new InterruptedException("interrupt() was called before next context was scheduled"); } try { monitor.wait(Math.max(untilNextContext.toMillis(), 0L)); } catch (InterruptedException ignored) { } } currentContext = nextContext; return currentContext; } }
class NodeAgentContextManager implements NodeAgentContextSupplier, NodeAgentScheduler { private final Object monitor = new Object(); private final Clock clock; private NodeAgentContext currentContext; private NodeAgentContext nextContext; private Instant nextContextAt; private boolean wantFrozen = false; private boolean isFrozen = true; private boolean pendingInterrupt = false; public NodeAgentContextManager(Clock clock, NodeAgentContext context) { this.clock = clock; this.currentContext = context; } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { synchronized (monitor) { nextContext = Objects.requireNonNull(context); nextContextAt = Objects.requireNonNull(at); monitor.notifyAll(); } } @Override public boolean setFrozen(boolean frozen, Duration timeout) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; monitor.notifyAll(); } boolean successful; long remainder; long end = clock.instant().plus(timeout).toEpochMilli(); while (!(successful = isFrozen == frozen) && (remainder = end - clock.millis()) > 0) { try { monitor.wait(remainder); } catch (InterruptedException ignored) { } } return successful; } } @Override @Override public NodeAgentContext currentContext() { synchronized (monitor) { return currentContext; } } @Override public void interrupt() { synchronized (monitor) { pendingInterrupt = true; monitor.notifyAll(); } } private boolean setAndGetIsFrozen(boolean isFrozen) { synchronized (monitor) { if (this.isFrozen != isFrozen) { this.isFrozen = isFrozen; monitor.notifyAll(); } return this.isFrozen; } } }
class NodeAgentContextManager implements NodeAgentContextSupplier, NodeAgentScheduler { private final Object monitor = new Object(); private final Clock clock; private NodeAgentContext currentContext; private NodeAgentContext nextContext; private Instant nextContextAt; private boolean wantFrozen = false; private boolean isFrozen = true; private boolean pendingInterrupt = false; public NodeAgentContextManager(Clock clock, NodeAgentContext context) { this.clock = clock; this.currentContext = context; } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { synchronized (monitor) { nextContext = Objects.requireNonNull(context); nextContextAt = Objects.requireNonNull(at); monitor.notifyAll(); } } @Override public boolean setFrozen(boolean frozen, Duration timeout) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; monitor.notifyAll(); } boolean successful; long remainder; long end = clock.instant().plus(timeout).toEpochMilli(); while (!(successful = isFrozen == frozen) && (remainder = end - clock.millis()) > 0) { try { monitor.wait(remainder); } catch (InterruptedException ignored) { } } return successful; } } @Override @Override public NodeAgentContext currentContext() { synchronized (monitor) { return currentContext; } } @Override public void interrupt() { synchronized (monitor) { pendingInterrupt = true; monitor.notifyAll(); } } private boolean setAndGetIsFrozen(boolean isFrozen) { synchronized (monitor) { if (this.isFrozen != isFrozen) { this.isFrozen = isFrozen; monitor.notifyAll(); } return this.isFrozen; } } }
Each Iceberg split contains data files, delete files (for upsert), schema string. Each data file also contains stats for every column. if the table is wide (many columns), each split may go over 10 KB
public byte[] serialize(HybridSourceEnumeratorState enumState) throws IOException { try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(baos)) { out.writeInt(enumState.getCurrentSourceIndex()); out.writeInt(enumState.wrappedStateSerializerVersion()); out.writeInt(enumState.getWrappedState().length); out.write(enumState.getWrappedState()); out.flush(); return baos.toByteArray(); } }
out.writeInt(enumState.getWrappedState().length);
public byte[] serialize(HybridSourceEnumeratorState enumState) throws IOException { try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(baos)) { out.writeInt(enumState.getCurrentSourceIndex()); out.writeInt(enumState.getWrappedStateSerializerVersion()); out.writeInt(enumState.getWrappedState().length); out.write(enumState.getWrappedState()); out.flush(); return baos.toByteArray(); } }
class HybridSourceEnumeratorStateSerializer implements SimpleVersionedSerializer<HybridSourceEnumeratorState> { private static final int CURRENT_VERSION = 0; public HybridSourceEnumeratorStateSerializer() {} @Override public int getVersion() { return CURRENT_VERSION; } @Override @Override public HybridSourceEnumeratorState deserialize(int version, byte[] serialized) throws IOException { if (version == 0) { return deserializeV0(serialized); } throw new IOException( String.format( "The bytes are serialized with version %d, " + "while this deserializer only supports version up to %d", version, CURRENT_VERSION)); } private HybridSourceEnumeratorState deserializeV0(byte[] serialized) throws IOException { try (ByteArrayInputStream bais = new ByteArrayInputStream(serialized); DataInputStream in = new DataInputStream(bais)) { int sourceIndex = in.readInt(); int nestedVersion = in.readInt(); int length = in.readInt(); byte[] nestedBytes = new byte[length]; in.readFully(nestedBytes); return new HybridSourceEnumeratorState(sourceIndex, nestedBytes, nestedVersion); } } }
class HybridSourceEnumeratorStateSerializer implements SimpleVersionedSerializer<HybridSourceEnumeratorState> { private static final int CURRENT_VERSION = 0; public HybridSourceEnumeratorStateSerializer() {} @Override public int getVersion() { return CURRENT_VERSION; } @Override @Override public HybridSourceEnumeratorState deserialize(int version, byte[] serialized) throws IOException { if (version == 0) { return deserializeV0(serialized); } throw new IOException( String.format( "The bytes are serialized with version %d, " + "while this deserializer only supports version up to %d", version, CURRENT_VERSION)); } private HybridSourceEnumeratorState deserializeV0(byte[] serialized) throws IOException { try (ByteArrayInputStream bais = new ByteArrayInputStream(serialized); DataInputStream in = new DataInputStream(bais)) { int sourceIndex = in.readInt(); int nestedVersion = in.readInt(); int length = in.readInt(); byte[] nestedBytes = new byte[length]; in.readFully(nestedBytes); return new HybridSourceEnumeratorState(sourceIndex, nestedBytes, nestedVersion); } } }
There are still has some assertEquals
public void histogram() { assertEquals(metricsTrackerFacade.getMetricsTrackerManager().getClass(), MetricsTrackerManagerFixture2.class); ((MetricsTrackerManagerFixture2) metricsTrackerFacade.getMetricsTrackerManager()).setMetricsTrackerFactory(new MetricsTrackerFactoryFixture2()); HistogramMetricsTrackerDelegate delegate = metricsTrackerFacade.histogramStartTimer("request"); metricsTrackerFacade.histogramObserveDuration(delegate); assertThat(delegate.getClass().getName(), is(NoneHistogramMetricsTrackerDelegate.class.getName())); }
assertEquals(metricsTrackerFacade.getMetricsTrackerManager().getClass(), MetricsTrackerManagerFixture2.class);
public void histogram() { assertThat(metricsTrackerFacade.getMetricsTrackerManager().getClass().getName(), is(MetricsTrackerManagerFixture2.class.getName())); ((MetricsTrackerManagerFixture2) metricsTrackerFacade.getMetricsTrackerManager()).setMetricsTrackerFactory(new MetricsTrackerFactoryFixture2()); HistogramMetricsTrackerDelegate delegate = metricsTrackerFacade.histogramStartTimer("request"); metricsTrackerFacade.histogramObserveDuration(delegate); assertThat(delegate.getClass().getName(), is(NoneHistogramMetricsTrackerDelegate.class.getName())); }
class MetricsTrackerFacadeTest { private MetricsTrackerFacade metricsTrackerFacade = MetricsTrackerFacade.getInstance(); @Before public void setUp() { MetricsConfiguration metricsConfiguration = new MetricsConfiguration("fixture", null, null, null); metricsTrackerFacade.init(metricsConfiguration); assertThat(metricsTrackerFacade.getMetricsTrackerManager().getClass().getName(), is(MetricsTrackerManagerFixture2.class.getName())); assertTrue(metricsTrackerFacade.getEnabled()); } @Test public void assertFindMetricsTrackerManager() { assertNull(metricsTrackerFacade.findMetricsTrackerManager("fixture1")); assertNotNull(metricsTrackerFacade.findMetricsTrackerManager("fixture")); } @Test public void counterInc() { metricsTrackerFacade.counterInc("request_total"); } @Test public void gaugeInc() { metricsTrackerFacade.gaugeInc("request_total"); } @Test public void gaugeDec() { metricsTrackerFacade.gaugeDec("request_total"); } @Test @Test public void summary() { assertEquals(metricsTrackerFacade.getMetricsTrackerManager().getClass(), MetricsTrackerManagerFixture2.class); ((MetricsTrackerManagerFixture2) metricsTrackerFacade.getMetricsTrackerManager()).setMetricsTrackerFactory(new MetricsTrackerFactoryFixture2()); SummaryMetricsTrackerDelegate delegate = metricsTrackerFacade.summaryStartTimer("request"); metricsTrackerFacade.summaryObserveDuration(delegate); assertThat(delegate.getClass().getName(), is(NoneSummaryMetricsTrackerDelegate.class.getName())); } @Test public void testNoneDelegate() { ((MetricsTrackerManagerFixture2) metricsTrackerFacade.getMetricsTrackerManager()).setMetricsTrackerFactory(new MetricsTrackerFactoryFixture()); SummaryMetricsTrackerDelegate summaryMetricsTrackerDelegate = metricsTrackerFacade.summaryStartTimer("request"); assertThat(summaryMetricsTrackerDelegate.getClass().getName(), is(NoneSummaryMetricsTrackerDelegate.class.getName())); HistogramMetricsTrackerDelegate histogramMetricsTrackerDelegate = metricsTrackerFacade.histogramStartTimer("request"); assertThat(histogramMetricsTrackerDelegate.getClass().getName(), is(NoneHistogramMetricsTrackerDelegate.class.getName())); } }
class MetricsTrackerFacadeTest { private MetricsTrackerFacade metricsTrackerFacade = MetricsTrackerFacade.getInstance(); @Before public void setUp() { MetricsConfiguration metricsConfiguration = new MetricsConfiguration("fixture", null, null, null); metricsTrackerFacade.init(metricsConfiguration); assertThat(metricsTrackerFacade.getMetricsTrackerManager().getClass().getName(), is(MetricsTrackerManagerFixture2.class.getName())); assertThat(metricsTrackerFacade.getEnabled(), is(true)); } @Test public void assertFindMetricsTrackerManager() { assertNull(metricsTrackerFacade.findMetricsTrackerManager("fixture1")); assertNotNull(metricsTrackerFacade.findMetricsTrackerManager("fixture")); } @Test public void counterInc() { metricsTrackerFacade.counterInc("request_total"); } @Test public void gaugeInc() { metricsTrackerFacade.gaugeInc("request_total"); } @Test public void gaugeDec() { metricsTrackerFacade.gaugeDec("request_total"); } @Test @Test public void summary() { assertThat(metricsTrackerFacade.getMetricsTrackerManager().getClass().getName(), is(MetricsTrackerManagerFixture2.class.getName())); ((MetricsTrackerManagerFixture2) metricsTrackerFacade.getMetricsTrackerManager()).setMetricsTrackerFactory(new MetricsTrackerFactoryFixture2()); SummaryMetricsTrackerDelegate delegate = metricsTrackerFacade.summaryStartTimer("request"); metricsTrackerFacade.summaryObserveDuration(delegate); assertThat(delegate.getClass().getName(), is(NoneSummaryMetricsTrackerDelegate.class.getName())); } @Test public void testNoneDelegate() { ((MetricsTrackerManagerFixture2) metricsTrackerFacade.getMetricsTrackerManager()).setMetricsTrackerFactory(new MetricsTrackerFactoryFixture()); SummaryMetricsTrackerDelegate summaryMetricsTrackerDelegate = metricsTrackerFacade.summaryStartTimer("request"); assertThat(summaryMetricsTrackerDelegate.getClass().getName(), is(NoneSummaryMetricsTrackerDelegate.class.getName())); HistogramMetricsTrackerDelegate histogramMetricsTrackerDelegate = metricsTrackerFacade.histogramStartTimer("request"); assertThat(histogramMetricsTrackerDelegate.getClass().getName(), is(NoneHistogramMetricsTrackerDelegate.class.getName())); } }
```suggestion if (!isAllowNull && !EngineType.supportNotNullColumn(engineName)) { ```
public void analyze(boolean isOlap, boolean isInternalCatalog, String engineName) throws AnalysisException { if (isInternalCatalog) { if (!isAllowNull) { if (!EngineType.supportNotNullColumn(engineName)) { throw new AnalysisException(String.format("All columns must be nullable for external table. " + "Column %s is not nullable, You can rebuild the external table and " + "We strongly recommend that you use catalog to access external data", name)); } } } analyze(isOlap); }
if (!EngineType.supportNotNullColumn(engineName)) {
public void analyze(boolean isOlap, boolean isInternalCatalog, String engineName) throws AnalysisException { if (isInternalCatalog) { if (!isAllowNull && !EngineType.supportNotNullColumn(engineName)) { throw new AnalysisException(String.format("All columns must be nullable for external table. " + "Column %s is not nullable, You can rebuild the external table and " + "We strongly recommend that you use catalog to access external data", name)); } } analyze(isOlap); }
class DefaultValueDef { public boolean isSet; public Expr expr; public DefaultValueDef(boolean isSet, Expr expr) { this.isSet = isSet; if (expr != null) { this.expr = expr; } else { this.expr = NullLiteral.create(Type.VARCHAR); } } private static final String ZERO = new String(new byte[] {0}); public static DefaultValueDef NOT_SET = new DefaultValueDef(false, NullLiteral.create(Type.VARCHAR)); public static DefaultValueDef NULL_DEFAULT_VALUE = new DefaultValueDef(true, NullLiteral.create(Type.VARCHAR)); public static DefaultValueDef EMPTY_VALUE = new DefaultValueDef(true, new StringLiteral(ZERO)); public static DefaultValueDef CURRENT_TIMESTAMP_VALUE = new DefaultValueDef(true, new FunctionCallExpr("now", new ArrayList<>())); }
class DefaultValueDef { public boolean isSet; public Expr expr; public DefaultValueDef(boolean isSet, Expr expr) { this.isSet = isSet; if (expr != null) { this.expr = expr; } else { this.expr = NullLiteral.create(Type.VARCHAR); } } private static final String ZERO = new String(new byte[] {0}); public static DefaultValueDef NOT_SET = new DefaultValueDef(false, NullLiteral.create(Type.VARCHAR)); public static DefaultValueDef NULL_DEFAULT_VALUE = new DefaultValueDef(true, NullLiteral.create(Type.VARCHAR)); public static DefaultValueDef EMPTY_VALUE = new DefaultValueDef(true, new StringLiteral(ZERO)); public static DefaultValueDef CURRENT_TIMESTAMP_VALUE = new DefaultValueDef(true, new FunctionCallExpr("now", new ArrayList<>())); }
There is no changed has been done to the `objectType.flags` value. If you look at the line 1655 we are explicitly set the flags value. This line is anyway didn't had any impact.
public BType readType(int cpI) throws IOException { byte tag = inputStream.readByte(); Name name = names.fromString(getStringCPEntryValue(inputStream)); var flags = inputStream.readLong(); int typeFlags = inputStream.readInt(); switch (tag) { case TypeTags.INT: return typeParamAnalyzer.getNominalType(symTable.intType, name, flags); case TypeTags.BYTE: return typeParamAnalyzer.getNominalType(symTable.byteType, name, flags); case TypeTags.FLOAT: return typeParamAnalyzer.getNominalType(symTable.floatType, name, flags); case TypeTags.DECIMAL: return typeParamAnalyzer.getNominalType(symTable.decimalType, name, flags); case TypeTags.STRING: return typeParamAnalyzer.getNominalType(symTable.stringType, name, flags); case TypeTags.BOOLEAN: return typeParamAnalyzer.getNominalType(symTable.booleanType, name, flags); case TypeTags.JSON: return isImmutable(flags) ? getEffectiveImmutableType(symTable.jsonType) : symTable.jsonType; case TypeTags.XML: BType constraintType = readTypeFromCp(); BXMLType mutableXmlType = new BXMLType(constraintType, symTable.xmlType.tsymbol); if (Symbols.isFlagOn(flags, Flags.PARAMETERIZED)) { mutableXmlType.flags |= Flags.PARAMETERIZED; } return isImmutable(flags) ? getEffectiveImmutableType(mutableXmlType) : mutableXmlType; case TypeTags.NIL: return symTable.nilType; case TypeTags.NEVER: return symTable.neverType; case TypeTags.ANYDATA: if (name.getValue().equals(Names.ANYDATA.getValue())) { name = Names.EMPTY; } BType anydataNominalType = typeParamAnalyzer.getNominalType(symTable.anydataType, name, flags); return isImmutable(flags) ? getEffectiveImmutableType(anydataNominalType, symTable.anydataType.tsymbol.pkgID, symTable.anydataType.tsymbol.owner) : anydataNominalType; case TypeTags.RECORD: int pkgCpIndex = inputStream.readInt(); PackageID pkgId = getPackageId(pkgCpIndex); String recordName = getStringCPEntryValue(inputStream); BRecordTypeSymbol recordSymbol = Symbols.createRecordSymbol(Flags.asMask(EnumSet.of(Flag.PUBLIC)), names.fromString(recordName), env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); recordSymbol.scope = new Scope(recordSymbol); BRecordType recordType = new BRecordType(recordSymbol, flags); recordSymbol.type = recordType; compositeStack.push(recordType); addShapeCP(recordType, cpI); recordType.sealed = inputStream.readBoolean(); recordType.restFieldType = readTypeFromCp(); int recordFields = inputStream.readInt(); for (int i = 0; i < recordFields; i++) { String fieldName = getStringCPEntryValue(inputStream); var fieldFlags = inputStream.readLong(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(fieldFlags, names.fromString(fieldName), recordSymbol.pkgID, fieldType, recordSymbol.scope.owner, symTable.builtinPos, COMPILED_SOURCE); defineAnnotAttachmentSymbols(inputStream, varSymbol); defineMarkDownDocAttachment(varSymbol, docBytes); BField structField = new BField(varSymbol.name, varSymbol.pos, varSymbol); recordType.fields.put(structField.name.value, structField); recordSymbol.scope.define(varSymbol.name, varSymbol); } boolean isInitAvailable = inputStream.readByte() == 1; if (isInitAvailable) { String recordInitFuncName = getStringCPEntryValue(inputStream); var recordInitFuncFlags = inputStream.readLong(); BInvokableType recordInitFuncType = (BInvokableType) readTypeFromCp(); Name initFuncName = names.fromString(recordInitFuncName); boolean isNative = Symbols.isFlagOn(recordInitFuncFlags, Flags.NATIVE); BInvokableSymbol recordInitFuncSymbol = Symbols.createFunctionSymbol(recordInitFuncFlags, initFuncName, initFuncName, env.pkgSymbol.pkgID, recordInitFuncType, env.pkgSymbol, isNative, symTable.builtinPos, COMPILED_SOURCE); recordInitFuncSymbol.retType = recordInitFuncType.retType; recordSymbol.initializerFunc = new BAttachedFunction(initFuncName, recordInitFuncSymbol, recordInitFuncType, symTable.builtinPos); recordSymbol.scope.define(initFuncName, recordInitFuncSymbol); } recordType.typeInclusions = readTypeInclusions(); Object poppedRecordType = compositeStack.pop(); assert poppedRecordType == recordType; if (pkgId.equals(env.pkgSymbol.pkgID)) { return recordType; } SymbolEnv pkgEnv = symTable.pkgEnvMap.get(packageCache.getSymbol(pkgId)); return getType(recordType, pkgEnv, names.fromString(recordName)); case TypeTags.TYPEDESC: BTypedescType typedescType = new BTypedescType(null, symTable.typeDesc.tsymbol); typedescType.constraint = readTypeFromCp(); typedescType.flags = flags; return typedescType; case TypeTags.TYPEREFDESC: int pkgIndex = inputStream.readInt(); PackageID pkg = getPackageId(pkgIndex); BPackageSymbol pkgSymbol = pkg.equals(env.pkgSymbol.pkgID) ? env.pkgSymbol : packageCache.getSymbol(pkg); String typeDefName = getStringCPEntryValue(inputStream); BTypeSymbol typeSymbol = Symbols.createTypeSymbol(SymTag.TYPE_REF, Flags.asMask(EnumSet.of(Flag.PUBLIC)), names.fromString(typeDefName), pkg, null, pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); boolean nullable = (typeFlags & TypeFlags.NILABLE) == TypeFlags.NILABLE; BTypeReferenceType typeReferenceType = new BTypeReferenceType(null, typeSymbol, flags, nullable); addShapeCP(typeReferenceType, cpI); compositeStack.push(typeReferenceType); typeReferenceType.referredType = readTypeFromCp(); Object poppedRefType = compositeStack.pop(); assert poppedRefType == typeReferenceType; return typeReferenceType; case TypeTags.PARAMETERIZED_TYPE: BParameterizedType type = new BParameterizedType(null, null, null, name, -1); type.paramValueType = readTypeFromCp(); type.flags = flags; type.paramIndex = inputStream.readInt(); return type; case TypeTags.STREAM: BStreamType bStreamType = new BStreamType(TypeTags.STREAM, null, null, symTable.streamType.tsymbol); bStreamType.constraint = readTypeFromCp(); bStreamType.completionType = readTypeFromCp(); bStreamType.flags = flags; return bStreamType; case TypeTags.TABLE: BTableType bTableType = new BTableType(TypeTags.TABLE, null, symTable.tableType.tsymbol, flags); bTableType.constraint = readTypeFromCp(); boolean hasFieldNameList = inputStream.readByte() == 1; if (hasFieldNameList) { int fieldNameListSize = inputStream.readInt(); bTableType.fieldNameList = new ArrayList<>(fieldNameListSize); for (int i = 0; i < fieldNameListSize; i++) { String fieldName = getStringCPEntryValue(inputStream); bTableType.fieldNameList.add(fieldName); } } boolean hasKeyConstraint = inputStream.readByte() == 1; if (hasKeyConstraint) { bTableType.keyTypeConstraint = readTypeFromCp(); if (bTableType.keyTypeConstraint.tsymbol == null) { bTableType.keyTypeConstraint.tsymbol = Symbols.createTypeSymbol(SymTag.TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, bTableType.keyTypeConstraint, env.pkgSymbol.owner, symTable.builtinPos, COMPILED_SOURCE); } } return bTableType; case TypeTags.MAP: BMapType bMapType = new BMapType(TypeTags.MAP, null, symTable.mapType.tsymbol, flags); bMapType.constraint = readTypeFromCp(); return bMapType; case TypeTags.INVOKABLE: BInvokableType bInvokableType = new BInvokableType(null, null, null, null); bInvokableType.tsymbol = Symbols.createInvokableTypeSymbol(SymTag.FUNCTION_TYPE, flags, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner, symTable.builtinPos, COMPILED_SOURCE); bInvokableType.flags = flags; if (inputStream.readBoolean()) { return bInvokableType; } int paramCount = inputStream.readInt(); List<BType> paramTypes = new ArrayList<>(paramCount); for (int i = 0; i < paramCount; i++) { paramTypes.add(readTypeFromCp()); } bInvokableType.paramTypes = paramTypes; if (inputStream.readBoolean()) { bInvokableType.restType = readTypeFromCp(); } bInvokableType.retType = readTypeFromCp(); return setTSymbolForInvokableType(bInvokableType, bInvokableType.retType); case TypeTags.ANY: BType anyNominalType = typeParamAnalyzer.getNominalType(symTable.anyType, name, flags); return isImmutable(flags) ? getEffectiveImmutableType(anyNominalType, symTable.anyType.tsymbol.pkgID, symTable.anyType.tsymbol.owner) : anyNominalType; case TypeTags.HANDLE: return symTable.handleType; case TypeTags.READONLY: return symTable.readonlyType; case TypeTags.ENDPOINT: break; case TypeTags.ARRAY: byte state = inputStream.readByte(); int size = inputStream.readInt(); BTypeSymbol arrayTypeSymbol = Symbols.createTypeSymbol(SymTag.ARRAY_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner, symTable.builtinPos, COMPILED_SOURCE); BArrayType bArrayType = new BArrayType(null, arrayTypeSymbol, size, BArrayState.valueOf(state), flags); bArrayType.eType = readTypeFromCp(); return bArrayType; case TypeTags.UNION: boolean isCyclic = inputStream.readByte() == 1; boolean hasName = inputStream.readByte() == 1; PackageID unionsPkgId = env.pkgSymbol.pkgID; Name unionName = Names.EMPTY; if (hasName) { pkgCpIndex = inputStream.readInt(); unionsPkgId = getPackageId(pkgCpIndex); String unionNameStr = getStringCPEntryValue(inputStream); unionName = names.fromString(unionNameStr); } BTypeSymbol unionTypeSymbol = Symbols.createTypeSymbol(SymTag.UNION_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), unionName, unionsPkgId, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); int unionMemberCount = inputStream.readInt(); BUnionType unionType = BUnionType.create(unionTypeSymbol, new LinkedHashSet<>(unionMemberCount)); unionType.name = unionName; addShapeCP(unionType, cpI); compositeStack.push(unionType); unionType.flags = flags; unionType.isCyclic = isCyclic; for (int i = 0; i < unionMemberCount; i++) { unionType.add(readTypeFromCp()); } int unionOriginalMemberCount = inputStream.readInt(); LinkedHashSet<BType> originalMemberTypes = new LinkedHashSet<>(unionOriginalMemberCount); for (int i = 0; i < unionOriginalMemberCount; i++) { originalMemberTypes.add(readTypeFromCp()); } unionType.setOriginalMemberTypes(originalMemberTypes); var poppedUnionType = compositeStack.pop(); assert poppedUnionType == unionType; boolean isEnum = inputStream.readBoolean(); if (isEnum) { readAndSetEnumSymbol(unionType, flags); } if (hasName) { if (unionsPkgId.equals(env.pkgSymbol.pkgID)) { return unionType; } else { pkgEnv = symTable.pkgEnvMap.get(packageCache.getSymbol(unionsPkgId)); if (pkgEnv != null) { BType existingUnionType = getType(unionType, pkgEnv, unionName); if (existingUnionType != symTable.noType) { return existingUnionType; } } } } return unionType; case TypeTags.INTERSECTION: BTypeSymbol intersectionTypeSymbol = Symbols.createTypeSymbol(SymTag.INTERSECTION_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); int intersectionMemberCount = inputStream.readInt(); LinkedHashSet<BType> constituentTypes = new LinkedHashSet<>(intersectionMemberCount); for (int i = 0; i < intersectionMemberCount; i++) { constituentTypes.add(readTypeFromCp()); } IntersectableReferenceType effectiveType = (IntersectableReferenceType) readTypeFromCp(); return new BIntersectionType(intersectionTypeSymbol, constituentTypes, effectiveType, flags); case TypeTags.PACKAGE: break; case TypeTags.NONE: return symTable.noType; case TypeTags.VOID: break; case TypeTags.XMLNS: break; case TypeTags.ANNOTATION: break; case TypeTags.SEMANTIC_ERROR: break; case TypeTags.ERROR: pkgCpIndex = inputStream.readInt(); pkgId = getPackageId(pkgCpIndex); BPackageSymbol owner = packageCache.getSymbol(pkgId); BTypeSymbol errorSymbol; if (owner != null) { errorSymbol = new BErrorTypeSymbol(SymTag.ERROR, Flags.PUBLIC, Names.EMPTY, owner.pkgID, null, owner, symTable.builtinPos, COMPILED_SOURCE); } else { errorSymbol = new BErrorTypeSymbol(SymTag.ERROR, Flags.PUBLIC, Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); } BErrorType errorType = new BErrorType(errorSymbol); addShapeCP(errorType, cpI); compositeStack.push(errorType); String errorName = getStringCPEntryValue(inputStream); BType detailsType = readTypeFromCp(); errorType.detailType = detailsType; errorType.flags = flags; errorSymbol.type = errorType; errorSymbol.pkgID = pkgId; errorSymbol.originalName = errorSymbol.name = names.fromString(errorName); Object poppedErrorType = compositeStack.pop(); assert poppedErrorType == errorType; if (!env.pkgSymbol.pkgID.equals(PackageID.ANNOTATIONS) && Symbols.isFlagOn(flags, Flags.NATIVE)) { return symTable.errorType; } errorType.typeIdSet = readTypeIdSet(inputStream); return errorType; case TypeTags.ITERATOR: break; case TypeTags.TUPLE: BTypeSymbol tupleTypeSymbol = Symbols.createTypeSymbol(SymTag.TUPLE_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner, symTable.builtinPos, COMPILED_SOURCE); int tupleMemberCount = inputStream.readInt(); List<BTupleMember> tupleMembers = new ArrayList<>(tupleMemberCount); BSymbol tupleOwner = tupleTypeSymbol.owner; PackageID tuplePkg = tupleTypeSymbol.pkgID; for (int i = 0; i < tupleMemberCount; i++) { String index = getStringCPEntryValue(inputStream); long fieldFlags = inputStream.readLong(); BType memberType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(fieldFlags, names.fromString(index), tuplePkg, memberType, tupleOwner, symTable.builtinPos, COMPILED_SOURCE); defineAnnotAttachmentSymbols(inputStream, varSymbol); tupleMembers.add(new BTupleMember(memberType, varSymbol)); } BTupleType bTupleType = new BTupleType(tupleTypeSymbol, tupleMembers); bTupleType.flags = flags; if (inputStream.readBoolean()) { bTupleType.restType = readTypeFromCp(); } return bTupleType; case TypeTags.FUTURE: BFutureType bFutureType = new BFutureType(TypeTags.FUTURE, null, symTable.futureType.tsymbol); bFutureType.constraint = readTypeFromCp(); bFutureType.flags = flags; return bFutureType; case TypeTags.FINITE: String finiteTypeName = getStringCPEntryValue(inputStream); var finiteTypeFlags = inputStream.readLong(); BTypeSymbol symbol = Symbols.createTypeSymbol(SymTag.FINITE_TYPE, finiteTypeFlags, names.fromString(finiteTypeName), env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); symbol.scope = new Scope(symbol); BFiniteType finiteType = new BFiniteType(symbol); finiteType.flags = flags; symbol.type = finiteType; int valueSpaceSize = inputStream.readInt(); for (int i = 0; i < valueSpaceSize; i++) { defineValueSpace(inputStream, finiteType, this); } return finiteType; case TypeTags.OBJECT: pkgCpIndex = inputStream.readInt(); pkgId = getPackageId(pkgCpIndex); String objName = getStringCPEntryValue(inputStream); long objSymFlags = inputStream.readLong(); BObjectTypeSymbol objectSymbol; if (Symbols.isFlagOn(objSymFlags, Flags.CLASS)) { objectSymbol = Symbols.createClassSymbol(objSymFlags, names.fromString(objName), env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE, false); } else { objectSymbol = Symbols.createObjectSymbol(objSymFlags, names.fromString(objName), env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); } objectSymbol.scope = new Scope(objectSymbol); BObjectType objectType; objectType = new BObjectType(objectSymbol); objectType.flags = flags; objectSymbol.type = objectType; addShapeCP(objectType, cpI); compositeStack.push(objectType); int fieldCount = inputStream.readInt(); for (int i = 0; i < fieldCount; i++) { String fieldName = getStringCPEntryValue(inputStream); var fieldFlags = inputStream.readLong(); var defaultable = inputStream.readBoolean(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol objectVarSymbol = new BVarSymbol(fieldFlags, names.fromString(fieldName), objectSymbol.pkgID, fieldType, objectSymbol.scope.owner, symTable.builtinPos, COMPILED_SOURCE); objectVarSymbol.isDefaultable = defaultable; defineMarkDownDocAttachment(objectVarSymbol, docBytes); BField structField = new BField(objectVarSymbol.name, null, objectVarSymbol); objectType.fields.put(structField.name.value, structField); objectSymbol.scope.define(objectVarSymbol.name, objectVarSymbol); } boolean generatedConstructorPresent = inputStream.readBoolean(); if (generatedConstructorPresent) { ignoreAttachedFunc(); } boolean constructorPresent = inputStream.readBoolean(); if (constructorPresent) { ignoreAttachedFunc(); } int funcCount = inputStream.readInt(); boolean isImmutable = isImmutable(objectSymbol.flags); for (int i = 0; i < funcCount; i++) { if (isImmutable) { populateIntersectionTypeReferencedFunctions(inputStream, objectSymbol); } else { ignoreAttachedFunc(); } } objectType.typeInclusions = readTypeInclusions(); objectType.typeIdSet = readTypeIdSet(inputStream); Object poppedObjType = compositeStack.pop(); assert poppedObjType == objectType; if (pkgId.equals(env.pkgSymbol.pkgID)) { return objectType; } pkgEnv = symTable.pkgEnvMap.get(packageCache.getSymbol(pkgId)); return getType(objectType, pkgEnv, names.fromString(objName)); case TypeTags.BYTE_ARRAY: break; case TypeTags.FUNCTION_POINTER: break; case SERVICE_TYPE_TAG: throw new AssertionError(); case TypeTags.SIGNED32_INT: return symTable.signed32IntType; case TypeTags.SIGNED16_INT: return symTable.signed16IntType; case TypeTags.SIGNED8_INT: return symTable.signed8IntType; case TypeTags.UNSIGNED32_INT: return symTable.unsigned32IntType; case TypeTags.UNSIGNED16_INT: return symTable.unsigned16IntType; case TypeTags.UNSIGNED8_INT: return symTable.unsigned8IntType; case TypeTags.CHAR_STRING: return symTable.charStringType; case TypeTags.XML_ELEMENT: return isImmutable(flags) ? getEffectiveImmutableType(symTable.xmlElementType) : symTable.xmlElementType; case TypeTags.XML_PI: return isImmutable(flags) ? getEffectiveImmutableType(symTable.xmlPIType) : symTable.xmlPIType; case TypeTags.XML_COMMENT: return isImmutable(flags) ? getEffectiveImmutableType(symTable.xmlCommentType) : symTable.xmlCommentType; case TypeTags.XML_TEXT: return symTable.xmlTextType; case TypeTags.REGEXP: return symTable.regExpType; } return null; }
for (int i = 0; i < fieldCount; i++) {
public BType readType(int cpI) throws IOException { byte tag = inputStream.readByte(); Name name = names.fromString(getStringCPEntryValue(inputStream)); var flags = inputStream.readLong(); int typeFlags = inputStream.readInt(); switch (tag) { case TypeTags.INT: return typeParamAnalyzer.getNominalType(symTable.intType, name, flags); case TypeTags.BYTE: return typeParamAnalyzer.getNominalType(symTable.byteType, name, flags); case TypeTags.FLOAT: return typeParamAnalyzer.getNominalType(symTable.floatType, name, flags); case TypeTags.DECIMAL: return typeParamAnalyzer.getNominalType(symTable.decimalType, name, flags); case TypeTags.STRING: return typeParamAnalyzer.getNominalType(symTable.stringType, name, flags); case TypeTags.BOOLEAN: return typeParamAnalyzer.getNominalType(symTable.booleanType, name, flags); case TypeTags.JSON: return isImmutable(flags) ? getEffectiveImmutableType(symTable.jsonType) : symTable.jsonType; case TypeTags.XML: BType constraintType = readTypeFromCp(); BXMLType mutableXmlType = new BXMLType(constraintType, symTable.xmlType.tsymbol); if (Symbols.isFlagOn(flags, Flags.PARAMETERIZED)) { mutableXmlType.flags |= Flags.PARAMETERIZED; } return isImmutable(flags) ? getEffectiveImmutableType(mutableXmlType) : mutableXmlType; case TypeTags.NIL: return symTable.nilType; case TypeTags.NEVER: return symTable.neverType; case TypeTags.ANYDATA: if (name.getValue().equals(Names.ANYDATA.getValue())) { name = Names.EMPTY; } BType anydataNominalType = typeParamAnalyzer.getNominalType(symTable.anydataType, name, flags); return isImmutable(flags) ? getEffectiveImmutableType(anydataNominalType, symTable.anydataType.tsymbol.pkgID, symTable.anydataType.tsymbol.owner) : anydataNominalType; case TypeTags.RECORD: int pkgCpIndex = inputStream.readInt(); PackageID pkgId = getPackageId(pkgCpIndex); String recordName = getStringCPEntryValue(inputStream); BRecordTypeSymbol recordSymbol = Symbols.createRecordSymbol(Flags.asMask(EnumSet.of(Flag.PUBLIC)), names.fromString(recordName), env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); recordSymbol.scope = new Scope(recordSymbol); BRecordType recordType = new BRecordType(recordSymbol, flags); recordSymbol.type = recordType; compositeStack.push(recordType); addShapeCP(recordType, cpI); recordType.sealed = inputStream.readBoolean(); recordType.restFieldType = readTypeFromCp(); int recordFields = inputStream.readInt(); for (int i = 0; i < recordFields; i++) { String fieldName = getStringCPEntryValue(inputStream); var fieldFlags = inputStream.readLong(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(fieldFlags, names.fromString(fieldName), recordSymbol.pkgID, fieldType, recordSymbol.scope.owner, symTable.builtinPos, COMPILED_SOURCE); defineAnnotAttachmentSymbols(inputStream, varSymbol); defineMarkDownDocAttachment(varSymbol, docBytes); BField structField = new BField(varSymbol.name, varSymbol.pos, varSymbol); recordType.fields.put(structField.name.value, structField); recordSymbol.scope.define(varSymbol.name, varSymbol); } boolean isInitAvailable = inputStream.readByte() == 1; if (isInitAvailable) { String recordInitFuncName = getStringCPEntryValue(inputStream); var recordInitFuncFlags = inputStream.readLong(); BInvokableType recordInitFuncType = (BInvokableType) readTypeFromCp(); Name initFuncName = names.fromString(recordInitFuncName); boolean isNative = Symbols.isFlagOn(recordInitFuncFlags, Flags.NATIVE); BInvokableSymbol recordInitFuncSymbol = Symbols.createFunctionSymbol(recordInitFuncFlags, initFuncName, initFuncName, env.pkgSymbol.pkgID, recordInitFuncType, env.pkgSymbol, isNative, symTable.builtinPos, COMPILED_SOURCE); recordInitFuncSymbol.retType = recordInitFuncType.retType; recordSymbol.initializerFunc = new BAttachedFunction(initFuncName, recordInitFuncSymbol, recordInitFuncType, symTable.builtinPos); recordSymbol.scope.define(initFuncName, recordInitFuncSymbol); } recordType.typeInclusions = readTypeInclusions(); Object poppedRecordType = compositeStack.pop(); assert poppedRecordType == recordType; if (pkgId.equals(env.pkgSymbol.pkgID)) { return recordType; } SymbolEnv pkgEnv = symTable.pkgEnvMap.get(packageCache.getSymbol(pkgId)); return getType(recordType, pkgEnv, names.fromString(recordName)); case TypeTags.TYPEDESC: BTypedescType typedescType = new BTypedescType(null, symTable.typeDesc.tsymbol); typedescType.constraint = readTypeFromCp(); typedescType.flags = flags; return typedescType; case TypeTags.TYPEREFDESC: int pkgIndex = inputStream.readInt(); PackageID pkg = getPackageId(pkgIndex); BPackageSymbol pkgSymbol = pkg.equals(env.pkgSymbol.pkgID) ? env.pkgSymbol : packageCache.getSymbol(pkg); String typeDefName = getStringCPEntryValue(inputStream); BTypeSymbol typeSymbol = Symbols.createTypeSymbol(SymTag.TYPE_REF, Flags.asMask(EnumSet.of(Flag.PUBLIC)), names.fromString(typeDefName), pkg, null, pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); boolean nullable = (typeFlags & TypeFlags.NILABLE) == TypeFlags.NILABLE; BTypeReferenceType typeReferenceType = new BTypeReferenceType(null, typeSymbol, flags, nullable); addShapeCP(typeReferenceType, cpI); compositeStack.push(typeReferenceType); typeReferenceType.referredType = readTypeFromCp(); Object poppedRefType = compositeStack.pop(); assert poppedRefType == typeReferenceType; return typeReferenceType; case TypeTags.PARAMETERIZED_TYPE: BParameterizedType type = new BParameterizedType(null, null, null, name, -1); type.paramValueType = readTypeFromCp(); type.flags = flags; type.paramIndex = inputStream.readInt(); return type; case TypeTags.STREAM: BStreamType bStreamType = new BStreamType(TypeTags.STREAM, null, null, symTable.streamType.tsymbol); bStreamType.constraint = readTypeFromCp(); bStreamType.completionType = readTypeFromCp(); bStreamType.flags = flags; return bStreamType; case TypeTags.TABLE: BTableType bTableType = new BTableType(TypeTags.TABLE, null, symTable.tableType.tsymbol, flags); bTableType.constraint = readTypeFromCp(); boolean hasFieldNameList = inputStream.readByte() == 1; if (hasFieldNameList) { int fieldNameListSize = inputStream.readInt(); bTableType.fieldNameList = new ArrayList<>(fieldNameListSize); for (int i = 0; i < fieldNameListSize; i++) { String fieldName = getStringCPEntryValue(inputStream); bTableType.fieldNameList.add(fieldName); } } boolean hasKeyConstraint = inputStream.readByte() == 1; if (hasKeyConstraint) { bTableType.keyTypeConstraint = readTypeFromCp(); if (bTableType.keyTypeConstraint.tsymbol == null) { bTableType.keyTypeConstraint.tsymbol = Symbols.createTypeSymbol(SymTag.TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, bTableType.keyTypeConstraint, env.pkgSymbol.owner, symTable.builtinPos, COMPILED_SOURCE); } } return bTableType; case TypeTags.MAP: BMapType bMapType = new BMapType(TypeTags.MAP, null, symTable.mapType.tsymbol, flags); bMapType.constraint = readTypeFromCp(); return bMapType; case TypeTags.INVOKABLE: BInvokableType bInvokableType = new BInvokableType(null, null, null, null); bInvokableType.tsymbol = Symbols.createInvokableTypeSymbol(SymTag.FUNCTION_TYPE, flags, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner, symTable.builtinPos, COMPILED_SOURCE); bInvokableType.flags = flags; if (inputStream.readBoolean()) { return bInvokableType; } int paramCount = inputStream.readInt(); List<BType> paramTypes = new ArrayList<>(paramCount); for (int i = 0; i < paramCount; i++) { paramTypes.add(readTypeFromCp()); } bInvokableType.paramTypes = paramTypes; if (inputStream.readBoolean()) { bInvokableType.restType = readTypeFromCp(); } bInvokableType.retType = readTypeFromCp(); return setTSymbolForInvokableType(bInvokableType, bInvokableType.retType); case TypeTags.ANY: BType anyNominalType = typeParamAnalyzer.getNominalType(symTable.anyType, name, flags); return isImmutable(flags) ? getEffectiveImmutableType(anyNominalType, symTable.anyType.tsymbol.pkgID, symTable.anyType.tsymbol.owner) : anyNominalType; case TypeTags.HANDLE: return symTable.handleType; case TypeTags.READONLY: return symTable.readonlyType; case TypeTags.ENDPOINT: break; case TypeTags.ARRAY: byte state = inputStream.readByte(); int size = inputStream.readInt(); BTypeSymbol arrayTypeSymbol = Symbols.createTypeSymbol(SymTag.ARRAY_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner, symTable.builtinPos, COMPILED_SOURCE); BArrayType bArrayType = new BArrayType(null, arrayTypeSymbol, size, BArrayState.valueOf(state), flags); bArrayType.eType = readTypeFromCp(); return bArrayType; case TypeTags.UNION: boolean isCyclic = inputStream.readByte() == 1; boolean hasName = inputStream.readByte() == 1; PackageID unionsPkgId = env.pkgSymbol.pkgID; Name unionName = Names.EMPTY; if (hasName) { pkgCpIndex = inputStream.readInt(); unionsPkgId = getPackageId(pkgCpIndex); String unionNameStr = getStringCPEntryValue(inputStream); unionName = names.fromString(unionNameStr); } BTypeSymbol unionTypeSymbol = Symbols.createTypeSymbol(SymTag.UNION_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), unionName, unionsPkgId, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); int unionMemberCount = inputStream.readInt(); BUnionType unionType = BUnionType.create(unionTypeSymbol, new LinkedHashSet<>(unionMemberCount)); unionType.name = unionName; addShapeCP(unionType, cpI); compositeStack.push(unionType); unionType.flags = flags; unionType.isCyclic = isCyclic; for (int i = 0; i < unionMemberCount; i++) { unionType.add(readTypeFromCp()); } int unionOriginalMemberCount = inputStream.readInt(); LinkedHashSet<BType> originalMemberTypes = new LinkedHashSet<>(unionOriginalMemberCount); for (int i = 0; i < unionOriginalMemberCount; i++) { originalMemberTypes.add(readTypeFromCp()); } unionType.setOriginalMemberTypes(originalMemberTypes); var poppedUnionType = compositeStack.pop(); assert poppedUnionType == unionType; boolean isEnum = inputStream.readBoolean(); if (isEnum) { readAndSetEnumSymbol(unionType, flags); } if (hasName) { if (unionsPkgId.equals(env.pkgSymbol.pkgID)) { return unionType; } else { pkgEnv = symTable.pkgEnvMap.get(packageCache.getSymbol(unionsPkgId)); if (pkgEnv != null) { BType existingUnionType = getType(unionType, pkgEnv, unionName); if (existingUnionType != symTable.noType) { return existingUnionType; } } } } return unionType; case TypeTags.INTERSECTION: BTypeSymbol intersectionTypeSymbol = Symbols.createTypeSymbol(SymTag.INTERSECTION_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); int intersectionMemberCount = inputStream.readInt(); LinkedHashSet<BType> constituentTypes = new LinkedHashSet<>(intersectionMemberCount); for (int i = 0; i < intersectionMemberCount; i++) { constituentTypes.add(readTypeFromCp()); } IntersectableReferenceType effectiveType = (IntersectableReferenceType) readTypeFromCp(); return new BIntersectionType(intersectionTypeSymbol, constituentTypes, effectiveType, flags); case TypeTags.PACKAGE: break; case TypeTags.NONE: return symTable.noType; case TypeTags.VOID: break; case TypeTags.XMLNS: break; case TypeTags.ANNOTATION: break; case TypeTags.SEMANTIC_ERROR: break; case TypeTags.ERROR: pkgCpIndex = inputStream.readInt(); pkgId = getPackageId(pkgCpIndex); BPackageSymbol owner = packageCache.getSymbol(pkgId); BTypeSymbol errorSymbol; if (owner != null) { errorSymbol = new BErrorTypeSymbol(SymTag.ERROR, Flags.PUBLIC, Names.EMPTY, owner.pkgID, null, owner, symTable.builtinPos, COMPILED_SOURCE); } else { errorSymbol = new BErrorTypeSymbol(SymTag.ERROR, Flags.PUBLIC, Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); } BErrorType errorType = new BErrorType(errorSymbol); addShapeCP(errorType, cpI); compositeStack.push(errorType); String errorName = getStringCPEntryValue(inputStream); BType detailsType = readTypeFromCp(); errorType.detailType = detailsType; errorType.flags = flags; errorSymbol.type = errorType; errorSymbol.pkgID = pkgId; errorSymbol.originalName = errorSymbol.name = names.fromString(errorName); Object poppedErrorType = compositeStack.pop(); assert poppedErrorType == errorType; if (!env.pkgSymbol.pkgID.equals(PackageID.ANNOTATIONS) && Symbols.isFlagOn(flags, Flags.NATIVE)) { return symTable.errorType; } errorType.typeIdSet = readTypeIdSet(inputStream); return errorType; case TypeTags.ITERATOR: break; case TypeTags.TUPLE: BTypeSymbol tupleTypeSymbol = Symbols.createTypeSymbol(SymTag.TUPLE_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner, symTable.builtinPos, COMPILED_SOURCE); int tupleMemberCount = inputStream.readInt(); List<BTupleMember> tupleMembers = new ArrayList<>(tupleMemberCount); BSymbol tupleOwner = tupleTypeSymbol.owner; PackageID tuplePkg = tupleTypeSymbol.pkgID; for (int i = 0; i < tupleMemberCount; i++) { String index = getStringCPEntryValue(inputStream); long fieldFlags = inputStream.readLong(); BType memberType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(fieldFlags, names.fromString(index), tuplePkg, memberType, tupleOwner, symTable.builtinPos, COMPILED_SOURCE); defineAnnotAttachmentSymbols(inputStream, varSymbol); tupleMembers.add(new BTupleMember(memberType, varSymbol)); } BTupleType bTupleType = new BTupleType(tupleTypeSymbol, tupleMembers); bTupleType.flags = flags; if (inputStream.readBoolean()) { bTupleType.restType = readTypeFromCp(); } return bTupleType; case TypeTags.FUTURE: BFutureType bFutureType = new BFutureType(TypeTags.FUTURE, null, symTable.futureType.tsymbol); bFutureType.constraint = readTypeFromCp(); bFutureType.flags = flags; return bFutureType; case TypeTags.FINITE: String finiteTypeName = getStringCPEntryValue(inputStream); var finiteTypeFlags = inputStream.readLong(); BTypeSymbol symbol = Symbols.createTypeSymbol(SymTag.FINITE_TYPE, finiteTypeFlags, names.fromString(finiteTypeName), env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); symbol.scope = new Scope(symbol); BFiniteType finiteType = new BFiniteType(symbol); finiteType.flags = flags; symbol.type = finiteType; int valueSpaceSize = inputStream.readInt(); for (int i = 0; i < valueSpaceSize; i++) { defineValueSpace(inputStream, finiteType, this); } return finiteType; case TypeTags.OBJECT: pkgCpIndex = inputStream.readInt(); pkgId = getPackageId(pkgCpIndex); String objName = getStringCPEntryValue(inputStream); long objSymFlags = inputStream.readLong(); BObjectTypeSymbol objectSymbol; if (Symbols.isFlagOn(objSymFlags, Flags.CLASS)) { objectSymbol = Symbols.createClassSymbol(objSymFlags, names.fromString(objName), env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE, false); } else { objectSymbol = Symbols.createObjectSymbol(objSymFlags, names.fromString(objName), env.pkgSymbol.pkgID, null, env.pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); } objectSymbol.scope = new Scope(objectSymbol); BObjectType objectType; objectType = new BObjectType(objectSymbol); objectType.flags = flags; objectSymbol.type = objectType; addShapeCP(objectType, cpI); compositeStack.push(objectType); int fieldCount = inputStream.readInt(); for (int i = 0; i < fieldCount; i++) { String fieldName = getStringCPEntryValue(inputStream); var fieldFlags = inputStream.readLong(); var defaultable = inputStream.readBoolean(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol objectVarSymbol = new BVarSymbol(fieldFlags, names.fromString(fieldName), objectSymbol.pkgID, fieldType, objectSymbol.scope.owner, symTable.builtinPos, COMPILED_SOURCE); objectVarSymbol.isDefaultable = defaultable; defineMarkDownDocAttachment(objectVarSymbol, docBytes); BField structField = new BField(objectVarSymbol.name, null, objectVarSymbol); objectType.fields.put(structField.name.value, structField); objectSymbol.scope.define(objectVarSymbol.name, objectVarSymbol); } boolean generatedConstructorPresent = inputStream.readBoolean(); if (generatedConstructorPresent) { ignoreAttachedFunc(); } boolean constructorPresent = inputStream.readBoolean(); if (constructorPresent) { ignoreAttachedFunc(); } int funcCount = inputStream.readInt(); boolean isImmutable = isImmutable(objectSymbol.flags); for (int i = 0; i < funcCount; i++) { if (isImmutable) { populateIntersectionTypeReferencedFunctions(inputStream, objectSymbol); } else { ignoreAttachedFunc(); } } objectType.typeInclusions = readTypeInclusions(); objectType.typeIdSet = readTypeIdSet(inputStream); Object poppedObjType = compositeStack.pop(); assert poppedObjType == objectType; if (pkgId.equals(env.pkgSymbol.pkgID)) { return objectType; } pkgEnv = symTable.pkgEnvMap.get(packageCache.getSymbol(pkgId)); return getType(objectType, pkgEnv, names.fromString(objName)); case TypeTags.BYTE_ARRAY: break; case TypeTags.FUNCTION_POINTER: break; case SERVICE_TYPE_TAG: throw new AssertionError(); case TypeTags.SIGNED32_INT: return symTable.signed32IntType; case TypeTags.SIGNED16_INT: return symTable.signed16IntType; case TypeTags.SIGNED8_INT: return symTable.signed8IntType; case TypeTags.UNSIGNED32_INT: return symTable.unsigned32IntType; case TypeTags.UNSIGNED16_INT: return symTable.unsigned16IntType; case TypeTags.UNSIGNED8_INT: return symTable.unsigned8IntType; case TypeTags.CHAR_STRING: return symTable.charStringType; case TypeTags.XML_ELEMENT: return isImmutable(flags) ? getEffectiveImmutableType(symTable.xmlElementType) : symTable.xmlElementType; case TypeTags.XML_PI: return isImmutable(flags) ? getEffectiveImmutableType(symTable.xmlPIType) : symTable.xmlPIType; case TypeTags.XML_COMMENT: return isImmutable(flags) ? getEffectiveImmutableType(symTable.xmlCommentType) : symTable.xmlCommentType; case TypeTags.XML_TEXT: return symTable.xmlTextType; case TypeTags.REGEXP: return symTable.regExpType; } return null; }
class BIRTypeReader { private DataInputStream inputStream; public BIRTypeReader(DataInputStream inputStream) { this.inputStream = inputStream; } private BType readTypeFromCp() throws IOException { return readBType(inputStream); } private BInvokableType setTSymbolForInvokableType(BInvokableType bInvokableType, BType retType) throws IOException { BInvokableTypeSymbol tSymbol = (BInvokableTypeSymbol) bInvokableType.tsymbol; boolean hasTSymbol = inputStream.readBoolean(); if (!hasTSymbol) { return bInvokableType; } int params = inputStream.readInt(); for (int i = 0; i < params; i++) { String paramName = getStringCPEntryValue(inputStream); var paramFlags = inputStream.readLong(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(paramFlags, names.fromString(paramName), tSymbol.pkgID, fieldType, tSymbol, symTable.builtinPos, COMPILED_SOURCE); varSymbol.isDefaultable = ((paramFlags & Flags.OPTIONAL) == Flags.OPTIONAL); defineMarkDownDocAttachment(varSymbol, docBytes); tSymbol.params.add(varSymbol); } boolean hasRestParam = inputStream.readBoolean(); if (hasRestParam) { String fieldName = getStringCPEntryValue(inputStream); var fieldFlags = inputStream.readLong(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(fieldFlags, names.fromString(fieldName), tSymbol.pkgID, fieldType, tSymbol, symTable.builtinPos, COMPILED_SOURCE); defineMarkDownDocAttachment(varSymbol, docBytes); tSymbol.restParam = varSymbol; } tSymbol.returnType = retType; int defaultValues = inputStream.readInt(); for (int i = 0; i < defaultValues; i++) { String paramName = getStringCPEntryValue(inputStream); BInvokableSymbol invokableSymbol = getSymbolOfClosure(); tSymbol.defaultValues.put(paramName, invokableSymbol); } return bInvokableType; } private BInvokableSymbol getSymbolOfClosure() throws IOException { String name = getStringCPEntryValue(inputStream); var flags = inputStream.readLong(); BType type = readTypeFromCp(); int pkgCpIndex = inputStream.readInt(); PackageID pkgId = getPackageId(pkgCpIndex); BInvokableSymbol invokableSymbol = Symbols.createFunctionSymbol(flags, Names.fromString(name), Names.fromString(name), pkgId, type, null, false, symTable.builtinPos, VIRTUAL); invokableSymbol.retType = invokableSymbol.type.getReturnType(); int parameters = inputStream.readInt(); for (int i = 0; i < parameters; i++) { String fieldName = getStringCPEntryValue(inputStream); var fieldFlags = inputStream.readLong(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(fieldFlags, Names.fromString(fieldName), pkgId, fieldType, null, symTable.builtinPos, COMPILED_SOURCE); defineMarkDownDocAttachment(varSymbol, docBytes); invokableSymbol.params.add(varSymbol); } return invokableSymbol; } private BTypeIdSet readTypeIdSet(DataInputStream inputStream) throws IOException { Set<BTypeIdSet.BTypeId> primary = new HashSet<>(); int primaryTypeIdCount = inputStream.readInt(); for (int i = 0; i < primaryTypeIdCount; i++) { primary.add(readTypeId(inputStream)); } Set<BTypeIdSet.BTypeId> secondary = new HashSet<>(); int secondaryTypeIdCount = inputStream.readInt(); for (int i = 0; i < secondaryTypeIdCount; i++) { secondary.add(readTypeId(inputStream)); } return new BTypeIdSet(primary, secondary); } private BTypeIdSet.BTypeId readTypeId(DataInputStream inputStream) throws IOException { int pkgCPIndex = inputStream.readInt(); PackageID packageId = getPackageId(pkgCPIndex); String name = getStringCPEntryValue(inputStream); boolean isPublicTypeId = inputStream.readBoolean(); return new BTypeIdSet.BTypeId(packageId, name, isPublicTypeId); } private void ignoreAttachedFunc() throws IOException { getStringCPEntryValue(inputStream); getStringCPEntryValue(inputStream); inputStream.readLong(); readTypeFromCp(); } private List<BType> readTypeInclusions() throws IOException { int nTypeInclusions = inputStream.readInt(); List<BType> typeInclusions = new ArrayList<>(nTypeInclusions); for (int i = 0; i < nTypeInclusions; i++) { BType inclusion = readTypeFromCp(); typeInclusions.add(inclusion); } return typeInclusions; } private void readAndSetEnumSymbol(BUnionType unionType, long flags) throws IOException { PackageID enumPkgId = getPackageId(inputStream.readInt()); String enumName = getStringCPEntryValue(inputStream); int memberCount = inputStream.readInt(); BSymbol pkgSymbol = packageCache.getSymbol(enumPkgId); if (pkgSymbol == null) { pkgSymbol = env.pkgSymbol; } SymbolEnv enumPkgEnv = symTable.pkgEnvMap.get(pkgSymbol); if (enumPkgEnv == null) { enumPkgEnv = SymbolEnv.createPkgEnv(null, env.pkgSymbol.scope, null); } List<BConstantSymbol> members = new ArrayList<>(); for (int i = 0; i < memberCount; i++) { String memName = getStringCPEntryValue(inputStream); BSymbol sym = symbolResolver.lookupSymbolInMainSpace(enumPkgEnv, names.fromString(memName)); members.add((BConstantSymbol) sym); } unionType.tsymbol = new BEnumSymbol(members, flags, names.fromString(enumName), pkgSymbol.pkgID, unionType, pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); } private void populateIntersectionTypeReferencedFunctions(DataInputStream inputStream, BObjectTypeSymbol objectSymbol) throws IOException { String attachedFuncName = getStringCPEntryValue(inputStream); String attachedFuncOrigName = getStringCPEntryValue(inputStream); var attachedFuncFlags = inputStream.readLong(); BInvokableType attachedFuncType = (BInvokableType) readTypeFromCp(); Name funcName = Names.fromString(Symbols.getAttachedFuncSymbolName( objectSymbol.name.value, attachedFuncName)); Name funcOrigName = Names.fromString(attachedFuncOrigName); BInvokableSymbol attachedFuncSymbol = Symbols.createFunctionSymbol(attachedFuncFlags, funcName, funcOrigName, env.pkgSymbol.pkgID, attachedFuncType, env.pkgSymbol, false, symTable.builtinPos, COMPILED_SOURCE); BAttachedFunction attachedFunction = new BAttachedFunction(Names.fromString(attachedFuncName), attachedFuncSymbol, attachedFuncType, symTable.builtinPos); setInvokableTypeSymbol(attachedFuncType); if (!Symbols.isFlagOn(attachedFuncType.flags, Flags.ANY_FUNCTION)) { BInvokableTypeSymbol tsymbol = (BInvokableTypeSymbol) attachedFuncType.tsymbol; attachedFuncSymbol.params = tsymbol.params; attachedFuncSymbol.restParam = tsymbol.restParam; attachedFuncSymbol.retType = tsymbol.returnType; } objectSymbol.referencedFunctions.add(attachedFunction); objectSymbol.attachedFuncs.add(attachedFunction); objectSymbol.scope.define(funcName, attachedFuncSymbol); } }
class BIRTypeReader { private DataInputStream inputStream; public BIRTypeReader(DataInputStream inputStream) { this.inputStream = inputStream; } private BType readTypeFromCp() throws IOException { return readBType(inputStream); } private BInvokableType setTSymbolForInvokableType(BInvokableType bInvokableType, BType retType) throws IOException { BInvokableTypeSymbol tSymbol = (BInvokableTypeSymbol) bInvokableType.tsymbol; boolean hasTSymbol = inputStream.readBoolean(); if (!hasTSymbol) { return bInvokableType; } int params = inputStream.readInt(); for (int i = 0; i < params; i++) { String paramName = getStringCPEntryValue(inputStream); var paramFlags = inputStream.readLong(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(paramFlags, names.fromString(paramName), tSymbol.pkgID, fieldType, tSymbol, symTable.builtinPos, COMPILED_SOURCE); varSymbol.isDefaultable = ((paramFlags & Flags.OPTIONAL) == Flags.OPTIONAL); defineMarkDownDocAttachment(varSymbol, docBytes); tSymbol.params.add(varSymbol); } boolean hasRestParam = inputStream.readBoolean(); if (hasRestParam) { String fieldName = getStringCPEntryValue(inputStream); var fieldFlags = inputStream.readLong(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(fieldFlags, names.fromString(fieldName), tSymbol.pkgID, fieldType, tSymbol, symTable.builtinPos, COMPILED_SOURCE); defineMarkDownDocAttachment(varSymbol, docBytes); tSymbol.restParam = varSymbol; } tSymbol.returnType = retType; int defaultValues = inputStream.readInt(); for (int i = 0; i < defaultValues; i++) { String paramName = getStringCPEntryValue(inputStream); BInvokableSymbol invokableSymbol = getSymbolOfClosure(); tSymbol.defaultValues.put(paramName, invokableSymbol); } return bInvokableType; } private BInvokableSymbol getSymbolOfClosure() throws IOException { String name = getStringCPEntryValue(inputStream); var flags = inputStream.readLong(); BType type = readTypeFromCp(); int pkgCpIndex = inputStream.readInt(); PackageID pkgId = getPackageId(pkgCpIndex); BInvokableSymbol invokableSymbol = Symbols.createFunctionSymbol(flags, Names.fromString(name), Names.fromString(name), pkgId, type, null, false, symTable.builtinPos, VIRTUAL); invokableSymbol.retType = invokableSymbol.type.getReturnType(); int parameters = inputStream.readInt(); for (int i = 0; i < parameters; i++) { String fieldName = getStringCPEntryValue(inputStream); var fieldFlags = inputStream.readLong(); byte[] docBytes = readDocBytes(inputStream); BType fieldType = readTypeFromCp(); BVarSymbol varSymbol = new BVarSymbol(fieldFlags, Names.fromString(fieldName), pkgId, fieldType, null, symTable.builtinPos, COMPILED_SOURCE); defineMarkDownDocAttachment(varSymbol, docBytes); invokableSymbol.params.add(varSymbol); } return invokableSymbol; } private BTypeIdSet readTypeIdSet(DataInputStream inputStream) throws IOException { Set<BTypeIdSet.BTypeId> primary = new HashSet<>(); int primaryTypeIdCount = inputStream.readInt(); for (int i = 0; i < primaryTypeIdCount; i++) { primary.add(readTypeId(inputStream)); } Set<BTypeIdSet.BTypeId> secondary = new HashSet<>(); int secondaryTypeIdCount = inputStream.readInt(); for (int i = 0; i < secondaryTypeIdCount; i++) { secondary.add(readTypeId(inputStream)); } return new BTypeIdSet(primary, secondary); } private BTypeIdSet.BTypeId readTypeId(DataInputStream inputStream) throws IOException { int pkgCPIndex = inputStream.readInt(); PackageID packageId = getPackageId(pkgCPIndex); String name = getStringCPEntryValue(inputStream); boolean isPublicTypeId = inputStream.readBoolean(); return new BTypeIdSet.BTypeId(packageId, name, isPublicTypeId); } private void ignoreAttachedFunc() throws IOException { getStringCPEntryValue(inputStream); getStringCPEntryValue(inputStream); inputStream.readLong(); readTypeFromCp(); } private List<BType> readTypeInclusions() throws IOException { int nTypeInclusions = inputStream.readInt(); List<BType> typeInclusions = new ArrayList<>(nTypeInclusions); for (int i = 0; i < nTypeInclusions; i++) { BType inclusion = readTypeFromCp(); typeInclusions.add(inclusion); } return typeInclusions; } private void readAndSetEnumSymbol(BUnionType unionType, long flags) throws IOException { PackageID enumPkgId = getPackageId(inputStream.readInt()); String enumName = getStringCPEntryValue(inputStream); int memberCount = inputStream.readInt(); BSymbol pkgSymbol = packageCache.getSymbol(enumPkgId); if (pkgSymbol == null) { pkgSymbol = env.pkgSymbol; } SymbolEnv enumPkgEnv = symTable.pkgEnvMap.get(pkgSymbol); if (enumPkgEnv == null) { enumPkgEnv = SymbolEnv.createPkgEnv(null, env.pkgSymbol.scope, null); } List<BConstantSymbol> members = new ArrayList<>(); for (int i = 0; i < memberCount; i++) { String memName = getStringCPEntryValue(inputStream); BSymbol sym = symbolResolver.lookupSymbolInMainSpace(enumPkgEnv, names.fromString(memName)); members.add((BConstantSymbol) sym); } unionType.tsymbol = new BEnumSymbol(members, flags, names.fromString(enumName), pkgSymbol.pkgID, unionType, pkgSymbol, symTable.builtinPos, COMPILED_SOURCE); } private void populateIntersectionTypeReferencedFunctions(DataInputStream inputStream, BObjectTypeSymbol objectSymbol) throws IOException { String attachedFuncName = getStringCPEntryValue(inputStream); String attachedFuncOrigName = getStringCPEntryValue(inputStream); var attachedFuncFlags = inputStream.readLong(); BInvokableType attachedFuncType = (BInvokableType) readTypeFromCp(); Name funcName = Names.fromString(Symbols.getAttachedFuncSymbolName( objectSymbol.name.value, attachedFuncName)); Name funcOrigName = Names.fromString(attachedFuncOrigName); BInvokableSymbol attachedFuncSymbol = Symbols.createFunctionSymbol(attachedFuncFlags, funcName, funcOrigName, env.pkgSymbol.pkgID, attachedFuncType, env.pkgSymbol, false, symTable.builtinPos, COMPILED_SOURCE); BAttachedFunction attachedFunction = new BAttachedFunction(Names.fromString(attachedFuncName), attachedFuncSymbol, attachedFuncType, symTable.builtinPos); setInvokableTypeSymbol(attachedFuncType); if (!Symbols.isFlagOn(attachedFuncType.flags, Flags.ANY_FUNCTION)) { BInvokableTypeSymbol tsymbol = (BInvokableTypeSymbol) attachedFuncType.tsymbol; attachedFuncSymbol.params = tsymbol.params; attachedFuncSymbol.restParam = tsymbol.restParam; attachedFuncSymbol.retType = tsymbol.returnType; } objectSymbol.referencedFunctions.add(attachedFunction); objectSymbol.attachedFuncs.add(attachedFunction); objectSymbol.scope.define(funcName, attachedFuncSymbol); } }
Please add a method on environment instead of matching the free form and human-readable region.
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData(environment, nodeSpec.hostname).writeTo(containerData); } if (environment.getRegion().startsWith("aws-")) { logger.info("Creating files for message of the day and the bash prompt"); new MotdContainerData(nodeSpec, environment).writeTo(containerData); new PromptContainerData(environment).writeTo(containerData); } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } }
To me it would make sense to auto-add `/` like for the `*Page` variables.
public FormAuthenticationMechanism get() { String key; if (!httpConfiguration.encryptionKey.isPresent()) { if (encryptionKey != null) { key = encryptionKey; } else { byte[] data = new byte[32]; new SecureRandom().nextBytes(data); key = encryptionKey = Base64.getEncoder().encodeToString(data); log.warn("Encryption key was not specified for persistent FORM auth, using temporary key " + key); } } else { key = httpConfiguration.encryptionKey.get(); } FormAuthConfig form = buildTimeConfig.auth.form; PersistentLoginManager loginManager = new PersistentLoginManager(key, form.cookieName, form.timeout.toMillis(), form.newCookieInterval.toMillis()); String loginPage = form.loginPage.startsWith("/") ? form.loginPage : "/" + form.loginPage; String errorPage = form.errorPage.startsWith("/") ? form.errorPage : "/" + form.errorPage; String landingPage = form.landingPage.startsWith("/") ? form.landingPage : "/" + form.landingPage; String postLocation = form.postLocation; String usernameFieldName = form.usernameFieldName; String passwordFieldName = form.passwordFieldName; String locationCookie = form.locationCookie; boolean redirectAfterLogin = form.redirectAfterLogin; return new FormAuthenticationMechanism(loginPage, postLocation, usernameFieldName, passwordFieldName, errorPage, landingPage, redirectAfterLogin, locationCookie, loginManager); }
String postLocation = form.postLocation;
public FormAuthenticationMechanism get() { String key; if (!httpConfiguration.encryptionKey.isPresent()) { if (encryptionKey != null) { key = encryptionKey; } else { byte[] data = new byte[32]; new SecureRandom().nextBytes(data); key = encryptionKey = Base64.getEncoder().encodeToString(data); log.warn("Encryption key was not specified for persistent FORM auth, using temporary key " + key); } } else { key = httpConfiguration.encryptionKey.get(); } FormAuthConfig form = buildTimeConfig.auth.form; PersistentLoginManager loginManager = new PersistentLoginManager(key, form.cookieName, form.timeout.toMillis(), form.newCookieInterval.toMillis()); String loginPage = form.loginPage.startsWith("/") ? form.loginPage : "/" + form.loginPage; String errorPage = form.errorPage.startsWith("/") ? form.errorPage : "/" + form.errorPage; String landingPage = form.landingPage.startsWith("/") ? form.landingPage : "/" + form.landingPage; String postLocation = form.postLocation.startsWith("/") ? form.postLocation : "/" + form.postLocation; String usernameParameter = form.usernameParameter; String passwordParameter = form.passwordParameter; String locationCookie = form.locationCookie; boolean redirectAfterLogin = form.redirectAfterLogin; return new FormAuthenticationMechanism(loginPage, postLocation, usernameParameter, passwordParameter, errorPage, landingPage, redirectAfterLogin, locationCookie, loginManager); }
class HttpSecurityRecorder { private static final Logger log = Logger.getLogger(HttpSecurityRecorder.class); protected static final Consumer<Throwable> NOOP_CALLBACK = new Consumer<Throwable>() { @Override public void accept(Throwable throwable) { } }; static volatile String encryptionKey; public Handler<RoutingContext> authenticationMechanismHandler(boolean proactiveAuthentication) { return new Handler<RoutingContext>() { volatile HttpAuthenticator authenticator; @Override public void handle(RoutingContext event) { if (authenticator == null) { authenticator = CDI.current().select(HttpAuthenticator.class).get(); } event.put(HttpAuthenticator.class.getName(), authenticator); event.put(QuarkusHttpUser.AUTH_FAILURE_HANDLER, new BiConsumer<RoutingContext, Throwable>() { @Override public void accept(RoutingContext routingContext, Throwable throwable) { throwable = extractRootCause(throwable); if (throwable instanceof AuthenticationFailedException) { authenticator.sendChallenge(event).subscribe().with(new Consumer<Boolean>() { @Override public void accept(Boolean aBoolean) { if (!event.response().ended()) { event.response().end(); } } }, new Consumer<Throwable>() { @Override public void accept(Throwable throwable) { event.fail(throwable); } }); } else if (throwable instanceof AuthenticationCompletionException) { event.response().setStatusCode(401); event.response().end(); } else if (throwable instanceof AuthenticationRedirectException) { AuthenticationRedirectException redirectEx = (AuthenticationRedirectException) throwable; event.response().setStatusCode(redirectEx.getCode()); event.response().headers().set(HttpHeaders.LOCATION, redirectEx.getRedirectUri()); event.response().headers().set(HttpHeaders.CACHE_CONTROL, "no-store"); event.response().headers().set("Pragma", "no-cache"); event.response().end(); } else { event.fail(throwable); } } }); Uni<SecurityIdentity> potentialUser = authenticator.attemptAuthentication(event).memoize().indefinitely(); if (proactiveAuthentication) { potentialUser .subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() { @Override public void onSubscribe(UniSubscription subscription) { } @Override public void onItem(SecurityIdentity identity) { if (event.response().ended()) { return; } if (identity == null) { Uni<SecurityIdentity> anon = authenticator.getIdentityProviderManager() .authenticate(AnonymousAuthenticationRequest.INSTANCE); anon.subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() { @Override public void onSubscribe(UniSubscription subscription) { } @Override public void onItem(SecurityIdentity item) { event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, anon); event.setUser(new QuarkusHttpUser(item)); event.next(); } @Override public void onFailure(Throwable failure) { BiConsumer<RoutingContext, Throwable> handler = event .get(QuarkusHttpUser.AUTH_FAILURE_HANDLER); if (handler != null) { handler.accept(event, failure); } } }); } else { event.setUser(new QuarkusHttpUser(identity)); event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, potentialUser); event.next(); } } @Override public void onFailure(Throwable failure) { BiConsumer<RoutingContext, Throwable> handler = event .get(QuarkusHttpUser.AUTH_FAILURE_HANDLER); if (handler != null) { handler.accept(event, failure); } } }); } else { Uni<SecurityIdentity> lazyUser = potentialUser .flatMap(new Function<SecurityIdentity, Uni<? extends SecurityIdentity>>() { @Override public Uni<? extends SecurityIdentity> apply(SecurityIdentity securityIdentity) { if (securityIdentity == null) { return authenticator.getIdentityProviderManager() .authenticate(AnonymousAuthenticationRequest.INSTANCE); } return Uni.createFrom().item(securityIdentity); } }).onTermination().invoke(new Functions.TriConsumer<SecurityIdentity, Throwable, Boolean>() { @Override public void accept(SecurityIdentity identity, Throwable throwable, Boolean aBoolean) { if (identity != null) { if (identity != null) { event.setUser(new QuarkusHttpUser(identity)); } } else if (throwable != null) { BiConsumer<RoutingContext, Throwable> handler = event .get(QuarkusHttpUser.AUTH_FAILURE_HANDLER); if (handler != null) { handler.accept(event, throwable); } } } }).memoize().indefinitely(); event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, lazyUser); event.next(); } } }; } private Throwable extractRootCause(Throwable throwable) { while ((throwable instanceof CompletionException && throwable.getCause() != null) || (throwable instanceof CompositeException)) { if (throwable instanceof CompositeException) { throwable = ((CompositeException) throwable).getCauses().get(0); } else { throwable = throwable.getCause(); } } return throwable; } public Handler<RoutingContext> permissionCheckHandler() { return new Handler<RoutingContext>() { volatile HttpAuthorizer authorizer; @Override public void handle(RoutingContext event) { if (authorizer == null) { authorizer = CDI.current().select(HttpAuthorizer.class).get(); } authorizer.checkPermission(event); } }; } public BeanContainerListener initPermissions(HttpBuildTimeConfig permissions, Map<String, Supplier<HttpSecurityPolicy>> policies) { return new BeanContainerListener() { @Override public void created(BeanContainer container) { container.instance(PathMatchingHttpSecurityPolicy.class).init(permissions, policies); } }; } public Supplier<FormAuthenticationMechanism> setupFormAuth(HttpConfiguration httpConfiguration, HttpBuildTimeConfig buildTimeConfig) { return new Supplier<FormAuthenticationMechanism>() { @Override }; } public Supplier<?> setupBasicAuth(HttpBuildTimeConfig buildTimeConfig) { return new Supplier<BasicAuthenticationMechanism>() { @Override public BasicAuthenticationMechanism get() { return new BasicAuthenticationMechanism(buildTimeConfig.auth.realm, "BASIC", buildTimeConfig.auth.form.enabled); } }; } public Supplier<?> setupMtlsClientAuth() { return new Supplier<MtlsAuthenticationMechanism>() { @Override public MtlsAuthenticationMechanism get() { return new MtlsAuthenticationMechanism(); } }; } }
class HttpSecurityRecorder { private static final Logger log = Logger.getLogger(HttpSecurityRecorder.class); protected static final Consumer<Throwable> NOOP_CALLBACK = new Consumer<Throwable>() { @Override public void accept(Throwable throwable) { } }; static volatile String encryptionKey; public Handler<RoutingContext> authenticationMechanismHandler(boolean proactiveAuthentication) { return new Handler<RoutingContext>() { volatile HttpAuthenticator authenticator; @Override public void handle(RoutingContext event) { if (authenticator == null) { authenticator = CDI.current().select(HttpAuthenticator.class).get(); } event.put(HttpAuthenticator.class.getName(), authenticator); event.put(QuarkusHttpUser.AUTH_FAILURE_HANDLER, new BiConsumer<RoutingContext, Throwable>() { @Override public void accept(RoutingContext routingContext, Throwable throwable) { throwable = extractRootCause(throwable); if (throwable instanceof AuthenticationFailedException) { authenticator.sendChallenge(event).subscribe().with(new Consumer<Boolean>() { @Override public void accept(Boolean aBoolean) { if (!event.response().ended()) { event.response().end(); } } }, new Consumer<Throwable>() { @Override public void accept(Throwable throwable) { event.fail(throwable); } }); } else if (throwable instanceof AuthenticationCompletionException) { event.response().setStatusCode(401); event.response().end(); } else if (throwable instanceof AuthenticationRedirectException) { AuthenticationRedirectException redirectEx = (AuthenticationRedirectException) throwable; event.response().setStatusCode(redirectEx.getCode()); event.response().headers().set(HttpHeaders.LOCATION, redirectEx.getRedirectUri()); event.response().headers().set(HttpHeaders.CACHE_CONTROL, "no-store"); event.response().headers().set("Pragma", "no-cache"); event.response().end(); } else { event.fail(throwable); } } }); Uni<SecurityIdentity> potentialUser = authenticator.attemptAuthentication(event).memoize().indefinitely(); if (proactiveAuthentication) { potentialUser .subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() { @Override public void onSubscribe(UniSubscription subscription) { } @Override public void onItem(SecurityIdentity identity) { if (event.response().ended()) { return; } if (identity == null) { Uni<SecurityIdentity> anon = authenticator.getIdentityProviderManager() .authenticate(AnonymousAuthenticationRequest.INSTANCE); anon.subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() { @Override public void onSubscribe(UniSubscription subscription) { } @Override public void onItem(SecurityIdentity item) { event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, anon); event.setUser(new QuarkusHttpUser(item)); event.next(); } @Override public void onFailure(Throwable failure) { BiConsumer<RoutingContext, Throwable> handler = event .get(QuarkusHttpUser.AUTH_FAILURE_HANDLER); if (handler != null) { handler.accept(event, failure); } } }); } else { event.setUser(new QuarkusHttpUser(identity)); event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, potentialUser); event.next(); } } @Override public void onFailure(Throwable failure) { BiConsumer<RoutingContext, Throwable> handler = event .get(QuarkusHttpUser.AUTH_FAILURE_HANDLER); if (handler != null) { handler.accept(event, failure); } } }); } else { Uni<SecurityIdentity> lazyUser = potentialUser .flatMap(new Function<SecurityIdentity, Uni<? extends SecurityIdentity>>() { @Override public Uni<? extends SecurityIdentity> apply(SecurityIdentity securityIdentity) { if (securityIdentity == null) { return authenticator.getIdentityProviderManager() .authenticate(AnonymousAuthenticationRequest.INSTANCE); } return Uni.createFrom().item(securityIdentity); } }).onTermination().invoke(new Functions.TriConsumer<SecurityIdentity, Throwable, Boolean>() { @Override public void accept(SecurityIdentity identity, Throwable throwable, Boolean aBoolean) { if (identity != null) { if (identity != null) { event.setUser(new QuarkusHttpUser(identity)); } } else if (throwable != null) { BiConsumer<RoutingContext, Throwable> handler = event .get(QuarkusHttpUser.AUTH_FAILURE_HANDLER); if (handler != null) { handler.accept(event, throwable); } } } }).memoize().indefinitely(); event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, lazyUser); event.next(); } } }; } private Throwable extractRootCause(Throwable throwable) { while ((throwable instanceof CompletionException && throwable.getCause() != null) || (throwable instanceof CompositeException)) { if (throwable instanceof CompositeException) { throwable = ((CompositeException) throwable).getCauses().get(0); } else { throwable = throwable.getCause(); } } return throwable; } public Handler<RoutingContext> permissionCheckHandler() { return new Handler<RoutingContext>() { volatile HttpAuthorizer authorizer; @Override public void handle(RoutingContext event) { if (authorizer == null) { authorizer = CDI.current().select(HttpAuthorizer.class).get(); } authorizer.checkPermission(event); } }; } public BeanContainerListener initPermissions(HttpBuildTimeConfig permissions, Map<String, Supplier<HttpSecurityPolicy>> policies) { return new BeanContainerListener() { @Override public void created(BeanContainer container) { container.instance(PathMatchingHttpSecurityPolicy.class).init(permissions, policies); } }; } public Supplier<FormAuthenticationMechanism> setupFormAuth(HttpConfiguration httpConfiguration, HttpBuildTimeConfig buildTimeConfig) { return new Supplier<FormAuthenticationMechanism>() { @Override }; } public Supplier<?> setupBasicAuth(HttpBuildTimeConfig buildTimeConfig) { return new Supplier<BasicAuthenticationMechanism>() { @Override public BasicAuthenticationMechanism get() { return new BasicAuthenticationMechanism(buildTimeConfig.auth.realm, "BASIC", buildTimeConfig.auth.form.enabled); } }; } public Supplier<?> setupMtlsClientAuth() { return new Supplier<MtlsAuthenticationMechanism>() { @Override public MtlsAuthenticationMechanism get() { return new MtlsAuthenticationMechanism(); } }; } }
yes, it's catalog, db and table name are case-sensitive.
public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PaimonTable that = (PaimonTable) o; return catalogName.equals(that.catalogName) && databaseName.equals(that.databaseName) && tableName.equals(that.tableName) && createTime == that.createTime; }
return catalogName.equals(that.catalogName) &&
public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PaimonTable that = (PaimonTable) o; return catalogName.equals(that.catalogName) && databaseName.equals(that.databaseName) && tableName.equals(that.tableName) && createTime == that.createTime; }
class PaimonTable extends Table { private final String catalogName; private final String databaseName; private final String tableName; private final AbstractFileStoreTable paimonNativeTable; private final List<String> partColumnNames; private final List<String> paimonFieldNames; private long latestSnapshotId; public PaimonTable(String catalogName, String dbName, String tblName, List<Column> schema, org.apache.paimon.table.Table paimonNativeTable, long createTime) { super(CONNECTOR_ID_GENERATOR.getNextId().asInt(), tblName, TableType.PAIMON, schema); this.catalogName = catalogName; this.databaseName = dbName; this.tableName = tblName; this.paimonNativeTable = (AbstractFileStoreTable) paimonNativeTable; this.partColumnNames = paimonNativeTable.partitionKeys(); this.paimonFieldNames = paimonNativeTable.rowType().getFields().stream() .map(DataField::name) .collect(Collectors.toList()); this.createTime = createTime; } @Override public String getCatalogName() { return catalogName; } public String getDbName() { return databaseName; } public String getTableName() { return tableName; } public AbstractFileStoreTable getNativeTable() { return paimonNativeTable; } @Override public String getUUID() { return String.join(".", catalogName, databaseName, tableName, Long.toString(createTime)); } @Override public String getTableLocation() { return paimonNativeTable.location().toString(); } @Override public List<String> getPartitionColumnNames() { return partColumnNames; } @Override public List<Column> getPartitionColumns() { List<Column> partitionColumns = new ArrayList<>(); if (!partColumnNames.isEmpty()) { partitionColumns = partColumnNames.stream().map(this::getColumn) .collect(Collectors.toList()); } return partitionColumns; } public List<String> getFieldNames() { return paimonFieldNames; } @Override public boolean isUnPartitioned() { return partColumnNames.isEmpty(); } @Override public boolean isSupported() { return true; } @Override public TTableDescriptor toThrift(List<DescriptorTable.ReferencedPartitionInfo> partitions) { TPaimonTable tPaimonTable = new TPaimonTable(); String encodedTable = PaimonScanNode.encodeObjectToString(paimonNativeTable); tPaimonTable.setPaimon_native_table(encodedTable); TTableDescriptor tTableDescriptor = new TTableDescriptor(id, TTableType.PAIMON_TABLE, fullSchema.size(), 0, tableName, databaseName); tTableDescriptor.setPaimonTable(tPaimonTable); return tTableDescriptor; } @Override @Override public int hashCode() { return Objects.hash(catalogName, databaseName, tableName, createTime); } public long getLatestSnapshotId() { return latestSnapshotId; } public void setLatestSnapshotId(long latestSnapshotId) { this.latestSnapshotId = latestSnapshotId; } }
class PaimonTable extends Table { private final String catalogName; private final String databaseName; private final String tableName; private final AbstractFileStoreTable paimonNativeTable; private final List<String> partColumnNames; private final List<String> paimonFieldNames; private long latestSnapshotId; public PaimonTable(String catalogName, String dbName, String tblName, List<Column> schema, org.apache.paimon.table.Table paimonNativeTable, long createTime) { super(CONNECTOR_ID_GENERATOR.getNextId().asInt(), tblName, TableType.PAIMON, schema); this.catalogName = catalogName; this.databaseName = dbName; this.tableName = tblName; this.paimonNativeTable = (AbstractFileStoreTable) paimonNativeTable; this.partColumnNames = paimonNativeTable.partitionKeys(); this.paimonFieldNames = paimonNativeTable.rowType().getFields().stream() .map(DataField::name) .collect(Collectors.toList()); this.createTime = createTime; } @Override public String getCatalogName() { return catalogName; } public String getDbName() { return databaseName; } public String getTableName() { return tableName; } public AbstractFileStoreTable getNativeTable() { return paimonNativeTable; } @Override public String getUUID() { return String.join(".", catalogName, databaseName, tableName, Long.toString(createTime)); } @Override public String getTableLocation() { return paimonNativeTable.location().toString(); } @Override public List<String> getPartitionColumnNames() { return partColumnNames; } @Override public List<Column> getPartitionColumns() { List<Column> partitionColumns = new ArrayList<>(); if (!partColumnNames.isEmpty()) { partitionColumns = partColumnNames.stream().map(this::getColumn) .collect(Collectors.toList()); } return partitionColumns; } public List<String> getFieldNames() { return paimonFieldNames; } @Override public boolean isUnPartitioned() { return partColumnNames.isEmpty(); } @Override public boolean isSupported() { return true; } @Override public TTableDescriptor toThrift(List<DescriptorTable.ReferencedPartitionInfo> partitions) { TPaimonTable tPaimonTable = new TPaimonTable(); String encodedTable = PaimonScanNode.encodeObjectToString(paimonNativeTable); tPaimonTable.setPaimon_native_table(encodedTable); TTableDescriptor tTableDescriptor = new TTableDescriptor(id, TTableType.PAIMON_TABLE, fullSchema.size(), 0, tableName, databaseName); tTableDescriptor.setPaimonTable(tPaimonTable); return tTableDescriptor; } @Override @Override public int hashCode() { return Objects.hash(catalogName, databaseName, tableName, createTime); } public long getLatestSnapshotId() { return latestSnapshotId; } public void setLatestSnapshotId(long latestSnapshotId) { this.latestSnapshotId = latestSnapshotId; } }
Not sure, if I understand your question. Before applying any specs the table has a schema as described by ``` contextResolvedTable.getResolvedSchema() .toSourceRowDataType() .getLogicalType() ``` Only once the `ReadingMetadataSpec` is applied it will change the type to `spec.getProducedType().get()`. In this particular case of reading metadata in a `TableSourceScan` it is equal to `DynamicSourceUtils.createProducedType(contextResolvedTable.getResolvedSchema(), tableSource);`
private DynamicTableSource getTableSource(FlinkContext context, FlinkTypeFactory typeFactory) { if (tableSource == null) { DynamicTableSourceFactory factory = context.getModuleManager() .getFactory(Module::getTableSourceFactory) .orElse(null); if (factory == null) { Catalog catalog = context.getCatalogManager() .getCatalog(contextResolvedTable.getIdentifier().getCatalogName()) .orElse(null); factory = FactoryUtil.getDynamicTableFactory(DynamicTableSourceFactory.class, catalog) .orElse(null); } tableSource = FactoryUtil.createDynamicTableSource( factory, contextResolvedTable.getIdentifier(), contextResolvedTable.getResolvedTable(), loadOptionsFromCatalogTable(contextResolvedTable, context), context.getTableConfig(), context.getClassLoader(), contextResolvedTable.isTemporary()); if (sourceAbilities != null) { RowType newProducedType = (RowType) contextResolvedTable .getResolvedSchema() .toSourceRowDataType() .getLogicalType(); for (SourceAbilitySpec spec : sourceAbilities) { SourceAbilityContext sourceAbilityContext = new SourceAbilityContext(context, typeFactory, newProducedType); spec.apply(tableSource, sourceAbilityContext); if (spec.getProducedType().isPresent()) { newProducedType = spec.getProducedType().get(); } } } } return tableSource; }
.getLogicalType();
private DynamicTableSource getTableSource(FlinkContext context, FlinkTypeFactory typeFactory) { if (tableSource == null) { DynamicTableSourceFactory factory = context.getModuleManager() .getFactory(Module::getTableSourceFactory) .orElse(null); if (factory == null) { Catalog catalog = context.getCatalogManager() .getCatalog(contextResolvedTable.getIdentifier().getCatalogName()) .orElse(null); factory = FactoryUtil.getDynamicTableFactory(DynamicTableSourceFactory.class, catalog) .orElse(null); } tableSource = FactoryUtil.createDynamicTableSource( factory, contextResolvedTable.getIdentifier(), contextResolvedTable.getResolvedTable(), loadOptionsFromCatalogTable(contextResolvedTable, context), context.getTableConfig(), context.getClassLoader(), contextResolvedTable.isTemporary()); if (sourceAbilities != null) { RowType newProducedType = (RowType) contextResolvedTable .getResolvedSchema() .toPhysicalRowDataType() .getLogicalType(); for (SourceAbilitySpec spec : sourceAbilities) { SourceAbilityContext sourceAbilityContext = new SourceAbilityContext(context, typeFactory, newProducedType); spec.apply(tableSource, sourceAbilityContext); if (spec.getProducedType().isPresent()) { newProducedType = spec.getProducedType().get(); } } } } return tableSource; }
class DynamicTableSourceSpec extends DynamicTableSpecBase { public static final String FIELD_NAME_CATALOG_TABLE = "table"; public static final String FIELD_NAME_SOURCE_ABILITIES = "abilities"; private final ContextResolvedTable contextResolvedTable; private final @Nullable List<SourceAbilitySpec> sourceAbilities; private DynamicTableSource tableSource; @JsonCreator public DynamicTableSourceSpec( @JsonProperty(FIELD_NAME_CATALOG_TABLE) ContextResolvedTable contextResolvedTable, @Nullable @JsonProperty(FIELD_NAME_SOURCE_ABILITIES) List<SourceAbilitySpec> sourceAbilities) { this.contextResolvedTable = contextResolvedTable; this.sourceAbilities = sourceAbilities; } public ScanTableSource getScanTableSource(FlinkContext context, FlinkTypeFactory typeFactory) { DynamicTableSource tableSource = getTableSource(context, typeFactory); if (tableSource instanceof ScanTableSource) { return (ScanTableSource) tableSource; } else { throw new TableException( String.format( "%s is not a ScanTableSource.\nPlease check it.", tableSource.getClass().getName())); } } public LookupTableSource getLookupTableSource( FlinkContext context, FlinkTypeFactory typeFactory) { DynamicTableSource tableSource = getTableSource(context, typeFactory); if (tableSource instanceof LookupTableSource) { return (LookupTableSource) tableSource; } else { throw new TableException( String.format( "%s is not a LookupTableSource.\nPlease check it.", tableSource.getClass().getName())); } } @JsonGetter(FIELD_NAME_CATALOG_TABLE) public ContextResolvedTable getContextResolvedTable() { return contextResolvedTable; } @JsonGetter(FIELD_NAME_SOURCE_ABILITIES) @JsonInclude(JsonInclude.Include.NON_EMPTY) @Nullable public List<SourceAbilitySpec> getSourceAbilities() { return sourceAbilities; } public void setTableSource(DynamicTableSource tableSource) { this.tableSource = tableSource; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } DynamicTableSourceSpec that = (DynamicTableSourceSpec) o; return Objects.equals(contextResolvedTable, that.contextResolvedTable) && Objects.equals(sourceAbilities, that.sourceAbilities) && Objects.equals(tableSource, that.tableSource); } @Override public int hashCode() { return Objects.hash(contextResolvedTable, sourceAbilities, tableSource); } @Override public String toString() { return "DynamicTableSourceSpec{" + "contextResolvedTable=" + contextResolvedTable + ", sourceAbilities=" + sourceAbilities + ", tableSource=" + tableSource + '}'; } }
class DynamicTableSourceSpec extends DynamicTableSpecBase { public static final String FIELD_NAME_CATALOG_TABLE = "table"; public static final String FIELD_NAME_SOURCE_ABILITIES = "abilities"; private final ContextResolvedTable contextResolvedTable; private final @Nullable List<SourceAbilitySpec> sourceAbilities; private DynamicTableSource tableSource; @JsonCreator public DynamicTableSourceSpec( @JsonProperty(FIELD_NAME_CATALOG_TABLE) ContextResolvedTable contextResolvedTable, @Nullable @JsonProperty(FIELD_NAME_SOURCE_ABILITIES) List<SourceAbilitySpec> sourceAbilities) { this.contextResolvedTable = contextResolvedTable; this.sourceAbilities = sourceAbilities; } public ScanTableSource getScanTableSource(FlinkContext context, FlinkTypeFactory typeFactory) { DynamicTableSource tableSource = getTableSource(context, typeFactory); if (tableSource instanceof ScanTableSource) { return (ScanTableSource) tableSource; } else { throw new TableException( String.format( "%s is not a ScanTableSource.\nPlease check it.", tableSource.getClass().getName())); } } public LookupTableSource getLookupTableSource( FlinkContext context, FlinkTypeFactory typeFactory) { DynamicTableSource tableSource = getTableSource(context, typeFactory); if (tableSource instanceof LookupTableSource) { return (LookupTableSource) tableSource; } else { throw new TableException( String.format( "%s is not a LookupTableSource.\nPlease check it.", tableSource.getClass().getName())); } } @JsonGetter(FIELD_NAME_CATALOG_TABLE) public ContextResolvedTable getContextResolvedTable() { return contextResolvedTable; } @JsonGetter(FIELD_NAME_SOURCE_ABILITIES) @JsonInclude(JsonInclude.Include.NON_EMPTY) @Nullable public List<SourceAbilitySpec> getSourceAbilities() { return sourceAbilities; } public void setTableSource(DynamicTableSource tableSource) { this.tableSource = tableSource; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } DynamicTableSourceSpec that = (DynamicTableSourceSpec) o; return Objects.equals(contextResolvedTable, that.contextResolvedTable) && Objects.equals(sourceAbilities, that.sourceAbilities) && Objects.equals(tableSource, that.tableSource); } @Override public int hashCode() { return Objects.hash(contextResolvedTable, sourceAbilities, tableSource); } @Override public String toString() { return "DynamicTableSourceSpec{" + "contextResolvedTable=" + contextResolvedTable + ", sourceAbilities=" + sourceAbilities + ", tableSource=" + tableSource + '}'; } }
IMU, if we set `azure.activedirectory.tenant-id=common`, then multi-tenant application will function.
private ClientRegistration azureClientRegistration() { String tenantId = aadAuthenticationProperties.getTenantId().trim(); Assert.hasText(tenantId, "azure.activedirectory.tenant-id should have text."); Assert.doesNotContain(tenantId, " ", "azure.activedirectory.tenant-id should not contain ' '."); Assert.doesNotContain(tenantId, "/", "azure.activedirectory.tenant-id should not contain '/'."); return ClientRegistration.withRegistrationId("azure") .clientId(aadAuthenticationProperties.getClientId()) .clientSecret(aadAuthenticationProperties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate("{baseUrl}/login/oauth2/code/{registrationId}") .scope("openid", "https: .authorizationUri( String.format( "https: tenantId ) ) .tokenUri( String.format( "https: tenantId ) ) .userInfoUri("https: .userNameAttributeName(AADAccessTokenClaim.NAME) .jwkSetUri( String.format( "https: tenantId ) ) .clientName("Azure") .build(); }
return ClientRegistration.withRegistrationId("azure")
private ClientRegistration azureClientRegistration() { String tenantId = aadAuthenticationProperties.getTenantId().trim(); Assert.hasText(tenantId, "azure.activedirectory.tenant-id should have text."); Assert.doesNotContain(tenantId, " ", "azure.activedirectory.tenant-id should not contain ' '."); Assert.doesNotContain(tenantId, "/", "azure.activedirectory.tenant-id should not contain '/'."); return ClientRegistration.withRegistrationId("azure") .clientId(aadAuthenticationProperties.getClientId()) .clientSecret(aadAuthenticationProperties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate("{baseUrl}/login/oauth2/code/{registrationId}") .scope(aadAuthenticationProperties.getScope()) .authorizationUri( String.format( "https: tenantId ) ) .tokenUri( String.format( "https: tenantId ) ) .userInfoUri("https: .userNameAttributeName(AADAccessTokenClaim.NAME) .jwkSetUri( String.format( "https: tenantId ) ) .clientName("Azure") .build(); }
class AADOAuth2AutoConfiguration { private final AADAuthenticationProperties aadAuthenticationProperties; private final ServiceEndpointsProperties serviceEndpointsProperties; public AADOAuth2AutoConfiguration(AADAuthenticationProperties aadAuthProperties, ServiceEndpointsProperties serviceEndpointsProperties) { this.aadAuthenticationProperties = aadAuthProperties; this.serviceEndpointsProperties = serviceEndpointsProperties; } @Bean @ConditionalOnProperty(prefix = "azure.activedirectory.user-group", value = "allowed-groups") public OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService() { return new AADOAuth2UserService(aadAuthenticationProperties, serviceEndpointsProperties); } @Bean public ClientRegistrationRepository clientRegistrationRepository() { return new InMemoryClientRegistrationRepository(azureClientRegistration()); } @PostConstruct private void sendTelemetry() { if (aadAuthenticationProperties.isAllowTelemetry()) { final Map<String, String> events = new HashMap<>(); final TelemetrySender sender = new TelemetrySender(); events.put(SERVICE_NAME, getClassPackageSimpleName(AADOAuth2AutoConfiguration.class)); sender.send(ClassUtils.getUserClass(getClass()).getSimpleName(), events); } } }
class AADOAuth2AutoConfiguration { private final AADAuthenticationProperties aadAuthenticationProperties; private final ServiceEndpointsProperties serviceEndpointsProperties; public AADOAuth2AutoConfiguration(AADAuthenticationProperties aadAuthProperties, ServiceEndpointsProperties serviceEndpointsProperties) { this.aadAuthenticationProperties = aadAuthProperties; this.serviceEndpointsProperties = serviceEndpointsProperties; } @Bean @ConditionalOnProperty(prefix = "azure.activedirectory.user-group", value = "allowed-groups") public OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService() { return new AADOAuth2UserService(aadAuthenticationProperties, serviceEndpointsProperties); } @Bean public ClientRegistrationRepository clientRegistrationRepository() { return new InMemoryClientRegistrationRepository(azureClientRegistration()); } @PostConstruct private void sendTelemetry() { if (aadAuthenticationProperties.isAllowTelemetry()) { final Map<String, String> events = new HashMap<>(); final TelemetrySender sender = new TelemetrySender(); events.put(SERVICE_NAME, getClassPackageSimpleName(AADOAuth2AutoConfiguration.class)); sender.send(ClassUtils.getUserClass(getClass()).getSimpleName(), events); } } }
You are right, I will add a checkArgument.
void requestExclusiveBuffers(int numExclusiveBuffers) throws IOException { if (numExclusiveBuffers <= 0) { return; } Collection<MemorySegment> segments = globalPool.requestMemorySegments(numExclusiveBuffers); synchronized (bufferQueue) { for (MemorySegment segment : segments) { bufferQueue.addExclusiveBuffer( new NetworkBuffer(segment, this), numRequiredBuffers); } } }
if (numExclusiveBuffers <= 0) {
void requestExclusiveBuffers(int numExclusiveBuffers) throws IOException { checkArgument(numExclusiveBuffers >= 0, "Num exclusive buffers must be non-negative."); if (numExclusiveBuffers == 0) { return; } Collection<MemorySegment> segments = globalPool.requestMemorySegments(numExclusiveBuffers); synchronized (bufferQueue) { checkState( unsynchronizedGetFloatingBuffersAvailable() == 0, "Bug in buffer allocation logic: floating buffer is allocated before exclusive buffers are initialized."); for (MemorySegment segment : segments) { bufferQueue.addExclusiveBuffer( new NetworkBuffer(segment, this), numRequiredBuffers); } } }
class BufferManager implements BufferListener, BufferRecycler { /** The available buffer queue wraps both exclusive and requested floating buffers. */ private final AvailableBufferQueue bufferQueue = new AvailableBufferQueue(); /** The buffer provider for requesting exclusive buffers. */ private final MemorySegmentProvider globalPool; /** The input channel to own this buffer manager. */ private final InputChannel inputChannel; /** * The tag indicates whether it is waiting for additional floating buffers from the buffer pool. */ @GuardedBy("bufferQueue") private boolean isWaitingForFloatingBuffers; /** The total number of required buffers for the respective input channel. */ @GuardedBy("bufferQueue") private int numRequiredBuffers; public BufferManager( MemorySegmentProvider globalPool, InputChannel inputChannel, int numRequiredBuffers) { this.globalPool = checkNotNull(globalPool); this.inputChannel = checkNotNull(inputChannel); checkArgument(numRequiredBuffers >= 0); this.numRequiredBuffers = numRequiredBuffers; } @Nullable Buffer requestBuffer(int initialCredit) { synchronized (bufferQueue) { if (initialCredit == 0) { --numRequiredBuffers; } return bufferQueue.takeBuffer(); } } Buffer requestBufferBlocking() throws InterruptedException { synchronized (bufferQueue) { Buffer buffer; while ((buffer = bufferQueue.takeBuffer()) == null) { if (inputChannel.isReleased()) { throw new CancelTaskException( "Input channel [" + inputChannel.channelInfo + "] has already been released."); } if (!isWaitingForFloatingBuffers) { BufferPool bufferPool = inputChannel.inputGate.getBufferPool(); buffer = bufferPool.requestBuffer(); if (buffer == null && shouldContinueRequest(bufferPool)) { continue; } } if (buffer != null) { return buffer; } bufferQueue.wait(); } return buffer; } } private boolean shouldContinueRequest(BufferPool bufferPool) { if (bufferPool.addBufferListener(this)) { isWaitingForFloatingBuffers = true; numRequiredBuffers = 1; return false; } else if (bufferPool.isDestroyed()) { throw new CancelTaskException("Local buffer pool has already been released."); } else { return true; } } /** Requests exclusive buffers from the provider. */ /** * Requests floating buffers from the buffer pool based on the given required amount, and * returns the actual requested amount. If the required amount is not fully satisfied, it will * register as a listener. */ int requestFloatingBuffers(int numRequired) { int numRequestedBuffers = 0; synchronized (bufferQueue) { if (inputChannel.isReleased()) { return numRequestedBuffers; } numRequiredBuffers = numRequired; while (bufferQueue.getAvailableBufferSize() < numRequiredBuffers && !isWaitingForFloatingBuffers) { BufferPool bufferPool = inputChannel.inputGate.getBufferPool(); Buffer buffer = bufferPool.requestBuffer(); if (buffer != null) { bufferQueue.addFloatingBuffer(buffer); numRequestedBuffers++; } else if (bufferPool.addBufferListener(this)) { isWaitingForFloatingBuffers = true; break; } } } return numRequestedBuffers; } /** * Exclusive buffer is recycled to this channel manager directly and it may trigger return extra * floating buffer based on <tt>numRequiredBuffers</tt>. * * @param segment The exclusive segment of this channel. */ @Override public void recycle(MemorySegment segment) { int numAddedBuffers = 0; synchronized (bufferQueue) { try { if (inputChannel.isReleased()) { globalPool.recycleMemorySegments(Collections.singletonList(segment)); } else { numAddedBuffers = bufferQueue.addExclusiveBuffer( new NetworkBuffer(segment, this), numRequiredBuffers); } } catch (Throwable t) { ExceptionUtils.rethrow(t); } finally { bufferQueue.notifyAll(); } } try { inputChannel.notifyBufferAvailable(numAddedBuffers); } catch (Throwable t) { ExceptionUtils.rethrow(t); } } void releaseFloatingBuffers() { Queue<Buffer> buffers; synchronized (bufferQueue) { numRequiredBuffers = 0; buffers = bufferQueue.clearFloatingBuffers(); } while (!buffers.isEmpty()) { buffers.poll().recycleBuffer(); } } /** Recycles all the exclusive and floating buffers from the given buffer queue. */ void releaseAllBuffers(ArrayDeque<Buffer> buffers) throws IOException { final List<MemorySegment> exclusiveRecyclingSegments = new ArrayList<>(); Exception err = null; Buffer buffer; while ((buffer = buffers.poll()) != null) { try { if (buffer.getRecycler() == BufferManager.this) { exclusiveRecyclingSegments.add(buffer.getMemorySegment()); } else { buffer.recycleBuffer(); } } catch (Exception e) { err = firstOrSuppressed(e, err); } } try { synchronized (bufferQueue) { bufferQueue.releaseAll(exclusiveRecyclingSegments); bufferQueue.notifyAll(); } } catch (Exception e) { err = firstOrSuppressed(e, err); } try { if (exclusiveRecyclingSegments.size() > 0) { globalPool.recycleMemorySegments(exclusiveRecyclingSegments); } } catch (Exception e) { err = firstOrSuppressed(e, err); } if (err != null) { throw err instanceof IOException ? (IOException) err : new IOException(err); } } /** * The buffer pool notifies this listener of an available floating buffer. If the listener is * released or currently does not need extra buffers, the buffer should be returned to the * buffer pool. Otherwise, the buffer will be added into the <tt>bufferQueue</tt>. * * @param buffer Buffer that becomes available in buffer pool. * @return NotificationResult indicates whether this channel accepts the buffer and is waiting * for more floating buffers. */ @Override public BufferListener.NotificationResult notifyBufferAvailable(Buffer buffer) { BufferListener.NotificationResult notificationResult = BufferListener.NotificationResult.BUFFER_NOT_USED; if (inputChannel.isReleased()) { return notificationResult; } try { synchronized (bufferQueue) { checkState( isWaitingForFloatingBuffers, "This channel should be waiting for floating buffers."); if (inputChannel.isReleased() || bufferQueue.getAvailableBufferSize() >= numRequiredBuffers) { isWaitingForFloatingBuffers = false; return notificationResult; } bufferQueue.addFloatingBuffer(buffer); bufferQueue.notifyAll(); if (bufferQueue.getAvailableBufferSize() == numRequiredBuffers) { isWaitingForFloatingBuffers = false; notificationResult = BufferListener.NotificationResult.BUFFER_USED_NO_NEED_MORE; } else { notificationResult = BufferListener.NotificationResult.BUFFER_USED_NEED_MORE; } } inputChannel.notifyBufferAvailable(1); } catch (Throwable t) { inputChannel.setError(t); } return notificationResult; } @Override public void notifyBufferDestroyed() { } @VisibleForTesting int unsynchronizedGetNumberOfRequiredBuffers() { return numRequiredBuffers; } @VisibleForTesting boolean unsynchronizedIsWaitingForFloatingBuffers() { return isWaitingForFloatingBuffers; } @VisibleForTesting int getNumberOfAvailableBuffers() { synchronized (bufferQueue) { return bufferQueue.getAvailableBufferSize(); } } int unsynchronizedGetAvailableExclusiveBuffers() { return bufferQueue.exclusiveBuffers.size(); } int unsynchronizedGetFloatingBuffersAvailable() { return bufferQueue.floatingBuffers.size(); } /** * Manages the exclusive and floating buffers of this channel, and handles the internal buffer * related logic. */ static final class AvailableBufferQueue { /** The current available floating buffers from the fixed buffer pool. */ final ArrayDeque<Buffer> floatingBuffers; /** The current available exclusive buffers from the global buffer pool. */ final ArrayDeque<Buffer> exclusiveBuffers; AvailableBufferQueue() { this.exclusiveBuffers = new ArrayDeque<>(); this.floatingBuffers = new ArrayDeque<>(); } /** * Adds an exclusive buffer (back) into the queue and recycles one floating buffer if the * number of available buffers in queue is more than the required amount. * * @param buffer The exclusive buffer to add * @param numRequiredBuffers The number of required buffers * @return How many buffers were added to the queue */ int addExclusiveBuffer(Buffer buffer, int numRequiredBuffers) { exclusiveBuffers.add(buffer); if (getAvailableBufferSize() > numRequiredBuffers) { Buffer floatingBuffer = floatingBuffers.poll(); if (floatingBuffer != null) { floatingBuffer.recycleBuffer(); return 0; } } return 1; } void addFloatingBuffer(Buffer buffer) { floatingBuffers.add(buffer); } /** * Takes the floating buffer first in order to make full use of floating buffers reasonably. * * @return An available floating or exclusive buffer, may be null if the channel is * released. */ @Nullable Buffer takeBuffer() { if (floatingBuffers.size() > 0) { return floatingBuffers.poll(); } else { return exclusiveBuffers.poll(); } } /** * The floating buffer is recycled to local buffer pool directly, and the exclusive buffer * will be gathered to return to global buffer pool later. * * @param exclusiveSegments The list that we will add exclusive segments into. */ void releaseAll(List<MemorySegment> exclusiveSegments) { Buffer buffer; while ((buffer = floatingBuffers.poll()) != null) { buffer.recycleBuffer(); } while ((buffer = exclusiveBuffers.poll()) != null) { exclusiveSegments.add(buffer.getMemorySegment()); } } Queue<Buffer> clearFloatingBuffers() { Queue<Buffer> buffers = new ArrayDeque<>(floatingBuffers); floatingBuffers.clear(); return buffers; } int getAvailableBufferSize() { return floatingBuffers.size() + exclusiveBuffers.size(); } } }
class BufferManager implements BufferListener, BufferRecycler { /** The available buffer queue wraps both exclusive and requested floating buffers. */ private final AvailableBufferQueue bufferQueue = new AvailableBufferQueue(); /** The buffer provider for requesting exclusive buffers. */ private final MemorySegmentProvider globalPool; /** The input channel to own this buffer manager. */ private final InputChannel inputChannel; /** * The tag indicates whether it is waiting for additional floating buffers from the buffer pool. */ @GuardedBy("bufferQueue") private boolean isWaitingForFloatingBuffers; /** The total number of required buffers for the respective input channel. */ @GuardedBy("bufferQueue") private int numRequiredBuffers; public BufferManager( MemorySegmentProvider globalPool, InputChannel inputChannel, int numRequiredBuffers) { this.globalPool = checkNotNull(globalPool); this.inputChannel = checkNotNull(inputChannel); checkArgument(numRequiredBuffers >= 0); this.numRequiredBuffers = numRequiredBuffers; } @Nullable Buffer requestBuffer() { synchronized (bufferQueue) { --numRequiredBuffers; return bufferQueue.takeBuffer(); } } Buffer requestBufferBlocking() throws InterruptedException { synchronized (bufferQueue) { Buffer buffer; while ((buffer = bufferQueue.takeBuffer()) == null) { if (inputChannel.isReleased()) { throw new CancelTaskException( "Input channel [" + inputChannel.channelInfo + "] has already been released."); } if (!isWaitingForFloatingBuffers) { BufferPool bufferPool = inputChannel.inputGate.getBufferPool(); buffer = bufferPool.requestBuffer(); if (buffer == null && shouldContinueRequest(bufferPool)) { continue; } } if (buffer != null) { return buffer; } bufferQueue.wait(); } return buffer; } } private boolean shouldContinueRequest(BufferPool bufferPool) { if (bufferPool.addBufferListener(this)) { isWaitingForFloatingBuffers = true; numRequiredBuffers = 1; return false; } else if (bufferPool.isDestroyed()) { throw new CancelTaskException("Local buffer pool has already been released."); } else { return true; } } /** Requests exclusive buffers from the provider. */ /** * Requests floating buffers from the buffer pool based on the given required amount, and * returns the actual requested amount. If the required amount is not fully satisfied, it will * register as a listener. */ int requestFloatingBuffers(int numRequired) { int numRequestedBuffers = 0; synchronized (bufferQueue) { if (inputChannel.isReleased()) { return numRequestedBuffers; } numRequiredBuffers = numRequired; while (bufferQueue.getAvailableBufferSize() < numRequiredBuffers && !isWaitingForFloatingBuffers) { BufferPool bufferPool = inputChannel.inputGate.getBufferPool(); Buffer buffer = bufferPool.requestBuffer(); if (buffer != null) { bufferQueue.addFloatingBuffer(buffer); numRequestedBuffers++; } else if (bufferPool.addBufferListener(this)) { isWaitingForFloatingBuffers = true; break; } } } return numRequestedBuffers; } /** * Exclusive buffer is recycled to this channel manager directly and it may trigger return extra * floating buffer based on <tt>numRequiredBuffers</tt>. * * @param segment The exclusive segment of this channel. */ @Override public void recycle(MemorySegment segment) { @Nullable Buffer releasedFloatingBuffer = null; synchronized (bufferQueue) { try { if (inputChannel.isReleased()) { globalPool.recycleMemorySegments(Collections.singletonList(segment)); return; } else { releasedFloatingBuffer = bufferQueue.addExclusiveBuffer( new NetworkBuffer(segment, this), numRequiredBuffers); } } catch (Throwable t) { ExceptionUtils.rethrow(t); } finally { bufferQueue.notifyAll(); } } if (releasedFloatingBuffer != null) { releasedFloatingBuffer.recycleBuffer(); } else { try { inputChannel.notifyBufferAvailable(1); } catch (Throwable t) { ExceptionUtils.rethrow(t); } } } void releaseFloatingBuffers() { Queue<Buffer> buffers; synchronized (bufferQueue) { numRequiredBuffers = 0; buffers = bufferQueue.clearFloatingBuffers(); } while (!buffers.isEmpty()) { buffers.poll().recycleBuffer(); } } /** Recycles all the exclusive and floating buffers from the given buffer queue. */ void releaseAllBuffers(ArrayDeque<Buffer> buffers) throws IOException { final List<MemorySegment> exclusiveRecyclingSegments = new ArrayList<>(); Exception err = null; Buffer buffer; while ((buffer = buffers.poll()) != null) { try { if (buffer.getRecycler() == BufferManager.this) { exclusiveRecyclingSegments.add(buffer.getMemorySegment()); } else { buffer.recycleBuffer(); } } catch (Exception e) { err = firstOrSuppressed(e, err); } } try { synchronized (bufferQueue) { bufferQueue.releaseAll(exclusiveRecyclingSegments); bufferQueue.notifyAll(); } } catch (Exception e) { err = firstOrSuppressed(e, err); } try { if (exclusiveRecyclingSegments.size() > 0) { globalPool.recycleMemorySegments(exclusiveRecyclingSegments); } } catch (Exception e) { err = firstOrSuppressed(e, err); } if (err != null) { throw err instanceof IOException ? (IOException) err : new IOException(err); } } /** * The buffer pool notifies this listener of an available floating buffer. If the listener is * released or currently does not need extra buffers, the buffer should be returned to the * buffer pool. Otherwise, the buffer will be added into the <tt>bufferQueue</tt>. * * @param buffer Buffer that becomes available in buffer pool. * @return NotificationResult indicates whether this channel accepts the buffer and is waiting * for more floating buffers. */ @Override public BufferListener.NotificationResult notifyBufferAvailable(Buffer buffer) { BufferListener.NotificationResult notificationResult = BufferListener.NotificationResult.BUFFER_NOT_USED; if (inputChannel.isReleased()) { return notificationResult; } try { synchronized (bufferQueue) { checkState( isWaitingForFloatingBuffers, "This channel should be waiting for floating buffers."); if (inputChannel.isReleased() || bufferQueue.getAvailableBufferSize() >= numRequiredBuffers) { isWaitingForFloatingBuffers = false; return notificationResult; } bufferQueue.addFloatingBuffer(buffer); bufferQueue.notifyAll(); if (bufferQueue.getAvailableBufferSize() == numRequiredBuffers) { isWaitingForFloatingBuffers = false; notificationResult = BufferListener.NotificationResult.BUFFER_USED_NO_NEED_MORE; } else { notificationResult = BufferListener.NotificationResult.BUFFER_USED_NEED_MORE; } } inputChannel.notifyBufferAvailable(1); } catch (Throwable t) { inputChannel.setError(t); } return notificationResult; } @Override public void notifyBufferDestroyed() { } @VisibleForTesting int unsynchronizedGetNumberOfRequiredBuffers() { return numRequiredBuffers; } @VisibleForTesting boolean unsynchronizedIsWaitingForFloatingBuffers() { return isWaitingForFloatingBuffers; } @VisibleForTesting int getNumberOfAvailableBuffers() { synchronized (bufferQueue) { return bufferQueue.getAvailableBufferSize(); } } int unsynchronizedGetAvailableExclusiveBuffers() { return bufferQueue.exclusiveBuffers.size(); } int unsynchronizedGetFloatingBuffersAvailable() { return bufferQueue.floatingBuffers.size(); } /** * Manages the exclusive and floating buffers of this channel, and handles the internal buffer * related logic. */ static final class AvailableBufferQueue { /** The current available floating buffers from the fixed buffer pool. */ final ArrayDeque<Buffer> floatingBuffers; /** The current available exclusive buffers from the global buffer pool. */ final ArrayDeque<Buffer> exclusiveBuffers; AvailableBufferQueue() { this.exclusiveBuffers = new ArrayDeque<>(); this.floatingBuffers = new ArrayDeque<>(); } /** * Adds an exclusive buffer (back) into the queue and releases one floating buffer if the * number of available buffers in queue is more than the required amount. If floating buffer * is released, the total amount of available buffers after adding this exclusive buffer has * not changed, and no new buffers are available. The caller is responsible for recycling * the release/returned floating buffer. * * @param buffer The exclusive buffer to add * @param numRequiredBuffers The number of required buffers * @return An released floating buffer, may be null if the numRequiredBuffers is not met. */ @Nullable Buffer addExclusiveBuffer(Buffer buffer, int numRequiredBuffers) { exclusiveBuffers.add(buffer); if (getAvailableBufferSize() > numRequiredBuffers) { return floatingBuffers.poll(); } return null; } void addFloatingBuffer(Buffer buffer) { floatingBuffers.add(buffer); } /** * Takes the floating buffer first in order to make full use of floating buffers reasonably. * * @return An available floating or exclusive buffer, may be null if the channel is * released. */ @Nullable Buffer takeBuffer() { if (floatingBuffers.size() > 0) { return floatingBuffers.poll(); } else { return exclusiveBuffers.poll(); } } /** * The floating buffer is recycled to local buffer pool directly, and the exclusive buffer * will be gathered to return to global buffer pool later. * * @param exclusiveSegments The list that we will add exclusive segments into. */ void releaseAll(List<MemorySegment> exclusiveSegments) { Buffer buffer; while ((buffer = floatingBuffers.poll()) != null) { buffer.recycleBuffer(); } while ((buffer = exclusiveBuffers.poll()) != null) { exclusiveSegments.add(buffer.getMemorySegment()); } } Queue<Buffer> clearFloatingBuffers() { Queue<Buffer> buffers = new ArrayDeque<>(floatingBuffers); floatingBuffers.clear(); return buffers; } int getAvailableBufferSize() { return floatingBuffers.size() + exclusiveBuffers.size(); } } }
your means that it should statistic the usage of disk instead of the data?
public void init() throws LoadBalanceException { Backend be = infoService.getBackend(beId); if (be == null) { throw new LoadBalanceException("backend " + beId + " does not exist"); } isAvailable = be.isScheduleAvailable() && be.isLoadAvailable() && be.isQueryAvailable(); ImmutableMap<String, DiskInfo> disks = be.getDisks(); for (DiskInfo diskInfo : disks.values()) { TStorageMedium medium = diskInfo.getStorageMedium(); if (diskInfo.getState() == DiskState.ONLINE) { totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L) + diskInfo.getTotalCapacityB()); totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) + (diskInfo.getTotalCapacityB() - diskInfo.getAvailableCapacityB())); } RootPathLoadStatistic pathStatistic = new RootPathLoadStatistic(beId, diskInfo.getRootPath(), diskInfo.getPathHash(), diskInfo.getStorageMedium(), diskInfo.getTotalCapacityB(), diskInfo.getDiskUsedCapacityB(), diskInfo.getState()); pathStatistics.add(pathStatistic); } totalReplicaNumMap = invertedIndex.getReplicaNumByBeIdAndStorageMedium(beId); for (TStorageMedium medium : TStorageMedium.values()) { if (!hasMedium(medium)) { totalReplicaNumMap.put(medium, 0L); } } for (TStorageMedium storageMedium : TStorageMedium.values()) { classifyPathByLoad(storageMedium); } Collections.sort(pathStatistics); }
diskInfo.getTotalCapacityB(), diskInfo.getDiskUsedCapacityB(), diskInfo.getState());
public void init() throws LoadBalanceException { Backend be = infoService.getBackend(beId); if (be == null) { throw new LoadBalanceException("backend " + beId + " does not exist"); } isAvailable = be.isScheduleAvailable() && be.isLoadAvailable() && be.isQueryAvailable(); ImmutableMap<String, DiskInfo> disks = be.getDisks(); for (DiskInfo diskInfo : disks.values()) { TStorageMedium medium = diskInfo.getStorageMedium(); if (diskInfo.getState() == DiskState.ONLINE) { totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L) + diskInfo.getTotalCapacityB()); totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) + (diskInfo.getTotalCapacityB() - diskInfo.getAvailableCapacityB())); } RootPathLoadStatistic pathStatistic = new RootPathLoadStatistic(beId, diskInfo.getRootPath(), diskInfo.getPathHash(), diskInfo.getStorageMedium(), diskInfo.getTotalCapacityB(), diskInfo.getDiskUsedCapacityB(), diskInfo.getState()); pathStatistics.add(pathStatistic); } totalReplicaNumMap = invertedIndex.getReplicaNumByBeIdAndStorageMedium(beId); for (TStorageMedium medium : TStorageMedium.values()) { if (!hasMedium(medium)) { totalReplicaNumMap.put(medium, 0L); } } for (TStorageMedium storageMedium : TStorageMedium.values()) { classifyPathByLoad(storageMedium); } Collections.sort(pathStatistics); }
class LoadScore { public double replicaNumCoefficient = 0.5; public double capacityCoefficient = 0.5; public double score = 0.0; public static final LoadScore DUMMY = new LoadScore(); }
class LoadScore { public double replicaNumCoefficient = 0.5; public double capacityCoefficient = 0.5; public double score = 0.0; public static final LoadScore DUMMY = new LoadScore(); }
If old version forward to new version, the request.catalog will be null. Will it report NEP?
public TMasterOpResult proxyExecute(TMasterOpRequest request) { ctx.setCurrentCatalog(request.catalog); ctx.setDatabase(request.db); ctx.setQualifiedUser(request.user); ctx.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); ctx.getState().reset(); if (request.isSetResourceInfo()) { ctx.getSessionVariable().setResourceGroup(request.getResourceInfo().getGroup()); } if (request.isSetUser_ip()) { ctx.setRemoteIP(request.getUser_ip()); } if (request.isSetTime_zone()) { ctx.getSessionVariable().setTimeZone(request.getTime_zone()); } if (request.isSetStmt_id()) { ctx.setForwardedStmtId(request.getStmt_id()); } if (request.isSetSqlMode()) { ctx.getSessionVariable().setSqlMode(request.sqlMode); } if (request.isSetEnableStrictMode()) { ctx.getSessionVariable().setEnableInsertStrict(request.enableStrictMode); } if (request.isSetCurrent_user_ident()) { UserIdentity currentUserIdentity = UserIdentity.fromThrift(request.getCurrent_user_ident()); ctx.setCurrentUserIdentity(currentUserIdentity); } if (request.isSetIsLastStmt()) { ctx.setIsLastStmt(request.isIsLastStmt()); } else { ctx.setIsLastStmt(true); } if (request.isSetQuery_options()) { TQueryOptions queryOptions = request.getQuery_options(); if (queryOptions.isSetMem_limit()) { ctx.getSessionVariable().setMaxExecMemByte(queryOptions.getMem_limit()); } if (queryOptions.isSetQuery_timeout()) { ctx.getSessionVariable().setQueryTimeoutS(queryOptions.getQuery_timeout()); } if (queryOptions.isSetLoad_mem_limit()) { ctx.getSessionVariable().setLoadMemLimit(queryOptions.getLoad_mem_limit()); } if (queryOptions.isSetMax_scan_key_num()) { ctx.getSessionVariable().setMaxScanKeyNum(queryOptions.getMax_scan_key_num()); } if (queryOptions.isSetMax_pushdown_conditions_per_column()) { ctx.getSessionVariable().setMaxPushdownConditionsPerColumn( queryOptions.getMax_pushdown_conditions_per_column()); } } else { if (request.isSetExecMemLimit()) { ctx.getSessionVariable().setMaxExecMemByte(request.getExecMemLimit()); } if (request.isSetQueryTimeout()) { ctx.getSessionVariable().setQueryTimeoutS(request.getQueryTimeout()); } if (request.isSetLoadMemLimit()) { ctx.getSessionVariable().setLoadMemLimit(request.loadMemLimit); } } if (request.isSetQueryId()) { ctx.setQueryId(UUIDUtil.fromTUniqueid(request.getQueryId())); } ctx.setThreadLocalInfo(); if (ctx.getCurrentUserIdentity() == null) { TMasterOpResult result = new TMasterOpResult(); ctx.getState().setError( "Missing current user identity. You need to upgrade this Frontend to the same version as Leader Frontend."); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); result.setPacket(getResultPacket()); return result; } StmtExecutor executor = null; try { if (request.isSetModified_variables_sql()) { LOG.info("Set session variables first: {}", request.modified_variables_sql); new StmtExecutor(ctx, new OriginStatement(request.modified_variables_sql, 0), true).execute(); } int idx = request.isSetStmtIdx() ? request.getStmtIdx() : 0; executor = new StmtExecutor(ctx, new OriginStatement(request.getSql(), idx), true); executor.execute(); } catch (IOException e) { LOG.warn("Process one query failed because IOException: ", e); ctx.getState().setError("StarRocks process failed: " + e.getMessage()); } catch (Throwable e) { LOG.warn("Process one query failed because unknown reason: ", e); ctx.getState().setError("Unexpected exception: " + e.getMessage()); } TMasterOpResult result = new TMasterOpResult(); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); if (!ctx.getIsLastStmt() && ctx.getState().getStateType() != QueryState.MysqlStateType.ERR) { ctx.getState().serverStatus |= MysqlServerStatusFlag.SERVER_MORE_RESULTS_EXISTS; } result.setPacket(getResultPacket()); result.setState(ctx.getState().getStateType().toString()); if (executor != null) { if (executor.getProxyResultSet() != null) { result.setResultSet(executor.getProxyResultSet().tothrift()); } else if (executor.getProxyResultBuffer() != null) { result.setChannelBufferList(executor.getProxyResultBuffer()); } } return result; }
ctx.setCurrentCatalog(request.catalog);
public TMasterOpResult proxyExecute(TMasterOpRequest request) { ctx.setCurrentCatalog(request.catalog); if (ctx.getCurrentCatalog() == null) { TMasterOpResult result = new TMasterOpResult(); ctx.getState().setError( "Missing current catalog. You need to upgrade this Frontend to the same version as Leader Frontend."); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); result.setPacket(getResultPacket()); return result; } ctx.setDatabase(request.db); ctx.setQualifiedUser(request.user); ctx.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); ctx.getState().reset(); if (request.isSetResourceInfo()) { ctx.getSessionVariable().setResourceGroup(request.getResourceInfo().getGroup()); } if (request.isSetUser_ip()) { ctx.setRemoteIP(request.getUser_ip()); } if (request.isSetTime_zone()) { ctx.getSessionVariable().setTimeZone(request.getTime_zone()); } if (request.isSetStmt_id()) { ctx.setForwardedStmtId(request.getStmt_id()); } if (request.isSetSqlMode()) { ctx.getSessionVariable().setSqlMode(request.sqlMode); } if (request.isSetEnableStrictMode()) { ctx.getSessionVariable().setEnableInsertStrict(request.enableStrictMode); } if (request.isSetCurrent_user_ident()) { UserIdentity currentUserIdentity = UserIdentity.fromThrift(request.getCurrent_user_ident()); ctx.setCurrentUserIdentity(currentUserIdentity); } if (request.isSetIsLastStmt()) { ctx.setIsLastStmt(request.isIsLastStmt()); } else { ctx.setIsLastStmt(true); } if (request.isSetQuery_options()) { TQueryOptions queryOptions = request.getQuery_options(); if (queryOptions.isSetMem_limit()) { ctx.getSessionVariable().setMaxExecMemByte(queryOptions.getMem_limit()); } if (queryOptions.isSetQuery_timeout()) { ctx.getSessionVariable().setQueryTimeoutS(queryOptions.getQuery_timeout()); } if (queryOptions.isSetLoad_mem_limit()) { ctx.getSessionVariable().setLoadMemLimit(queryOptions.getLoad_mem_limit()); } if (queryOptions.isSetMax_scan_key_num()) { ctx.getSessionVariable().setMaxScanKeyNum(queryOptions.getMax_scan_key_num()); } if (queryOptions.isSetMax_pushdown_conditions_per_column()) { ctx.getSessionVariable().setMaxPushdownConditionsPerColumn( queryOptions.getMax_pushdown_conditions_per_column()); } } else { if (request.isSetExecMemLimit()) { ctx.getSessionVariable().setMaxExecMemByte(request.getExecMemLimit()); } if (request.isSetQueryTimeout()) { ctx.getSessionVariable().setQueryTimeoutS(request.getQueryTimeout()); } if (request.isSetLoadMemLimit()) { ctx.getSessionVariable().setLoadMemLimit(request.loadMemLimit); } } if (request.isSetQueryId()) { ctx.setQueryId(UUIDUtil.fromTUniqueid(request.getQueryId())); } ctx.setThreadLocalInfo(); if (ctx.getCurrentUserIdentity() == null) { TMasterOpResult result = new TMasterOpResult(); ctx.getState().setError( "Missing current user identity. You need to upgrade this Frontend to the same version as Leader Frontend."); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); result.setPacket(getResultPacket()); return result; } StmtExecutor executor = null; try { if (request.isSetModified_variables_sql()) { LOG.info("Set session variables first: {}", request.modified_variables_sql); new StmtExecutor(ctx, new OriginStatement(request.modified_variables_sql, 0), true).execute(); } int idx = request.isSetStmtIdx() ? request.getStmtIdx() : 0; executor = new StmtExecutor(ctx, new OriginStatement(request.getSql(), idx), true); executor.execute(); } catch (IOException e) { LOG.warn("Process one query failed because IOException: ", e); ctx.getState().setError("StarRocks process failed: " + e.getMessage()); } catch (Throwable e) { LOG.warn("Process one query failed because unknown reason: ", e); ctx.getState().setError("Unexpected exception: " + e.getMessage()); } TMasterOpResult result = new TMasterOpResult(); result.setMaxJournalId(GlobalStateMgr.getCurrentState().getMaxJournalId()); if (!ctx.getIsLastStmt() && ctx.getState().getStateType() != QueryState.MysqlStateType.ERR) { ctx.getState().serverStatus |= MysqlServerStatusFlag.SERVER_MORE_RESULTS_EXISTS; } result.setPacket(getResultPacket()); result.setState(ctx.getState().getStateType().toString()); if (executor != null) { if (executor.getProxyResultSet() != null) { result.setResultSet(executor.getProxyResultSet().tothrift()); } else if (executor.getProxyResultBuffer() != null) { result.setChannelBufferList(executor.getProxyResultBuffer()); } } return result; }
class ConnectProcessor { private static final Logger LOG = LogManager.getLogger(ConnectProcessor.class); private final ConnectContext ctx; private ByteBuffer packetBuf; private StmtExecutor executor = null; public ConnectProcessor(ConnectContext context) { this.ctx = context; } private void handleInitDb() { String identifier = new String(packetBuf.array(), 1, packetBuf.limit() - 1); try { String[] parts = identifier.trim().split("\\s+"); if (parts.length == 2) { if (parts[0].equalsIgnoreCase("catalog")) { ctx.getGlobalStateMgr().changeCatalog(ctx, parts[1]); } else if (parts[0].equalsIgnoreCase("warehouse")) { ctx.getGlobalStateMgr().changeWarehouse(ctx, parts[1]); } else { ctx.getState().setError("not supported command"); } } else { ctx.getGlobalStateMgr().changeCatalogDb(ctx, identifier); } } catch (Exception e) { ctx.getState().setError(e.getMessage()); return; } ctx.getState().setOk(); } private void handleQuit() { ctx.setKilled(); ctx.getState().setOk(); } private void handleChangeUser() throws IOException { if (!MysqlProto.changeUser(ctx, packetBuf)) { LOG.warn("Failed to execute command `Change user`."); return; } handleResetConnection(); } private void handleResetConnection() throws IOException { resetConnectionSession(); ctx.getState().setOk(); } private void handlePing() { ctx.getState().setOk(); } private void resetConnectionSession() { ctx.getSerializer().reset(); ctx.getSerializer().setCapability(ctx.getCapability()); ctx.resetSessionVariable(); } public void auditAfterExec(String origStmt, StatementBase parsedStmt, PQueryStatistics statistics) { long endTime = System.currentTimeMillis(); long elapseMs = endTime - ctx.getStartTime(); ctx.getAuditEventBuilder().setEventType(EventType.AFTER_QUERY) .setState(ctx.getState().toString()).setErrorCode(ctx.getErrorCode()).setQueryTime(elapseMs) .setReturnRows(ctx.getReturnRows()) .setStmtId(ctx.getStmtId()) .setQueryId(ctx.getQueryId() == null ? "NaN" : ctx.getQueryId().toString()); if (statistics != null) { ctx.getAuditEventBuilder().setScanBytes(statistics.scanBytes); ctx.getAuditEventBuilder().setScanRows(statistics.scanRows); ctx.getAuditEventBuilder().setCpuCostNs(statistics.cpuCostNs == null ? -1 : statistics.cpuCostNs); ctx.getAuditEventBuilder().setMemCostBytes(statistics.memCostBytes == null ? -1 : statistics.memCostBytes); } if (ctx.getState().isQuery()) { MetricRepo.COUNTER_QUERY_ALL.increase(1L); ResourceGroupMetricMgr.increaseQuery(ctx, 1L); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { MetricRepo.COUNTER_QUERY_ERR.increase(1L); ResourceGroupMetricMgr.increaseQueryErr(ctx, 1L); } else { MetricRepo.COUNTER_QUERY_SUCCESS.increase(1L); MetricRepo.HISTO_QUERY_LATENCY.update(elapseMs); ResourceGroupMetricMgr.updateQueryLatency(ctx, elapseMs); if (elapseMs > Config.qe_slow_log_ms || ctx.getSessionVariable().isEnableSQLDigest()) { MetricRepo.COUNTER_SLOW_QUERY.increase(1L); ctx.getAuditEventBuilder().setDigest(computeStatementDigest(parsedStmt)); } } ctx.getAuditEventBuilder().setIsQuery(true); if (ctx.getSessionVariable().isEnableBigQueryLog()) { ctx.getAuditEventBuilder().setBigQueryLogCPUSecondThreshold( ctx.getSessionVariable().getBigQueryLogCPUSecondThreshold()); ctx.getAuditEventBuilder().setBigQueryLogScanBytesThreshold( ctx.getSessionVariable().getBigQueryLogScanBytesThreshold()); ctx.getAuditEventBuilder().setBigQueryLogScanRowsThreshold( ctx.getSessionVariable().getBigQueryLogScanRowsThreshold()); } } else { ctx.getAuditEventBuilder().setIsQuery(false); } ctx.getAuditEventBuilder().setFeIp(FrontendOptions.getLocalHostAddress()); if (!ctx.getState().isQuery() && (parsedStmt != null && parsedStmt.needAuditEncryption())) { ctx.getAuditEventBuilder().setStmt(AstToStringBuilder.toString(parsedStmt)); } else if (ctx.getState().isQuery() && containsComment(origStmt)) { ctx.getAuditEventBuilder().setStmt(origStmt); } else { ctx.getAuditEventBuilder().setStmt(origStmt.replace("\n", " ")); } GlobalStateMgr.getCurrentAuditEventProcessor().handleAuditEvent(ctx.getAuditEventBuilder().build()); } public String computeStatementDigest(StatementBase queryStmt) { String digest = SqlDigestBuilder.build(queryStmt); try { MessageDigest md = MessageDigest.getInstance("MD5"); md.reset(); md.update(digest.getBytes()); return Hex.encodeHexString(md.digest()); } catch (NoSuchAlgorithmException e) { return ""; } } private boolean containsComment(String sql) { return (sql.contains("--")) || sql.contains(" } private void addFinishedQueryDetail() { if (!Config.enable_collect_query_detail_info) { return; } QueryDetail queryDetail = ctx.getQueryDetail(); if (queryDetail == null || !queryDetail.getQueryId().equals(DebugUtil.printId(ctx.getQueryId()))) { return; } long endTime = System.currentTimeMillis(); long elapseMs = endTime - ctx.getStartTime(); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { queryDetail.setState(QueryDetail.QueryMemState.FAILED); queryDetail.setErrorMessage(ctx.getState().getErrorMessage()); } else { queryDetail.setState(QueryDetail.QueryMemState.FINISHED); } queryDetail.setEndTime(endTime); queryDetail.setLatency(elapseMs); queryDetail.setResourceGroupName(ctx.getResourceGroup() != null ? ctx.getResourceGroup().getName() : ""); QueryDetailQueue.addAndRemoveTimeoutQueryDetail(queryDetail); } private void addRunningQueryDetail(StatementBase parsedStmt) { if (!Config.enable_collect_query_detail_info) { return; } String sql = parsedStmt.getOrigStmt().originStmt; boolean isQuery = parsedStmt instanceof QueryStatement; QueryDetail queryDetail = new QueryDetail( DebugUtil.printId(ctx.getQueryId()), isQuery, ctx.connectionId, ctx.getMysqlChannel() != null ? ctx.getMysqlChannel().getRemoteIp() : "System", ctx.getStartTime(), -1, -1, QueryDetail.QueryMemState.RUNNING, ctx.getDatabase(), sql, ctx.getQualifiedUser(), Optional.ofNullable(ctx.getResourceGroup()).map(TWorkGroup::getName).orElse("")); ctx.setQueryDetail(queryDetail); QueryDetailQueue.addAndRemoveTimeoutQueryDetail(queryDetail.copy()); } private void handleQuery() { MetricRepo.COUNTER_REQUEST_ALL.increase(1L); String originStmt = null; byte[] bytes = packetBuf.array(); int ending = packetBuf.limit() - 1; while (ending >= 1 && bytes[ending] == '\0') { ending--; } originStmt = new String(bytes, 1, ending, StandardCharsets.UTF_8); ctx.getAuditEventBuilder().reset(); ctx.getAuditEventBuilder() .setTimestamp(System.currentTimeMillis()) .setClientIp(ctx.getMysqlChannel().getRemoteHostPortString()) .setUser(ctx.getQualifiedUser()) .setAuthorizedUser( ctx.getCurrentUserIdentity() == null ? "null" : ctx.getCurrentUserIdentity().toString()) .setDb(ctx.getDatabase()) .setCatalog(ctx.getCurrentCatalog()); ctx.getPlannerProfile().reset(); StatementBase parsedStmt = null; try { ctx.setQueryId(UUIDUtil.genUUID()); List<StatementBase> stmts; try { stmts = com.starrocks.sql.parser.SqlParser.parse(originStmt, ctx.getSessionVariable()); } catch (ParsingException parsingException) { throw new AnalysisException(parsingException.getMessage()); } for (int i = 0; i < stmts.size(); ++i) { ctx.getState().reset(); if (i > 0) { ctx.resetRetureRows(); ctx.setQueryId(UUIDUtil.genUUID()); } parsedStmt = stmts.get(i); parsedStmt.setOrigStmt(new OriginStatement(originStmt, i)); if (i == stmts.size() - 1) { addRunningQueryDetail(parsedStmt); } executor = new StmtExecutor(ctx, parsedStmt); ctx.setExecutor(executor); ctx.setIsLastStmt(i == stmts.size() - 1); executor.execute(); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { break; } if (i != stmts.size() - 1) { ctx.getState().serverStatus |= MysqlServerStatusFlag.SERVER_MORE_RESULTS_EXISTS; finalizeCommand(); } } } catch (IOException e) { LOG.warn("Process one query failed because IOException: ", e); ctx.getState().setError("StarRocks process failed"); } catch (UserException e) { LOG.warn("Process one query failed because.", e); ctx.getState().setError(e.getMessage()); ctx.getState().setErrType(QueryState.ErrType.ANALYSIS_ERR); } catch (Throwable e) { LOG.warn("Process one query failed because unknown reason: ", e); ctx.getState().setError("Unexpected exception: " + e.getMessage()); if (parsedStmt instanceof KillStmt) { ctx.getState().setErrType(QueryState.ErrType.ANALYSIS_ERR); } } if (executor != null) { auditAfterExec(originStmt, executor.getParsedStmt(), executor.getQueryStatisticsForAuditLog()); } else { auditAfterExec(originStmt, null, null); } addFinishedQueryDetail(); } private void handleFieldList() throws IOException { String tableName = new String(MysqlProto.readNulTerminateString(packetBuf), StandardCharsets.UTF_8); if (Strings.isNullOrEmpty(tableName)) { ctx.getState().setError("Empty tableName"); return; } Database db = ctx.getGlobalStateMgr().getMetadataMgr().getDb(ctx.getCurrentCatalog(), ctx.getDatabase()); if (db == null) { ctx.getState().setError("Unknown database(" + ctx.getDatabase() + ")"); return; } db.readLock(); try { Table table = ctx.getGlobalStateMgr().getMetadataMgr().getTable( ctx.getCurrentCatalog(), ctx.getDatabase(), tableName); if (table == null) { ctx.getState().setError("Unknown table(" + tableName + ")"); return; } MysqlSerializer serializer = ctx.getSerializer(); MysqlChannel channel = ctx.getMysqlChannel(); List<Column> baseSchema = table.getBaseSchema(); for (Column column : baseSchema) { serializer.reset(); serializer.writeField(db.getOriginName(), table.getName(), column, true); channel.sendOnePacket(serializer.toByteBuffer()); } } catch (StarRocksIcebergException e) { LOG.error("errors happened when getting Iceberg table {}", tableName, e); } catch (StarRocksConnectorException e) { LOG.error("errors happened when getting table {}", tableName, e); } finally { db.readUnlock(); } ctx.getState().setEof(); } private void dispatch() throws IOException { int code = packetBuf.get(); MysqlCommand command = MysqlCommand.fromCode(code); if (command == null) { ErrorReport.report(ErrorCode.ERR_UNKNOWN_COM_ERROR); ctx.getState().setError("Unknown command(" + command + ")"); LOG.warn("Unknown command(" + command + ")"); return; } ctx.setCommand(command); ctx.setStartTime(); ctx.setResourceGroup(null); ctx.setErrorCode(""); switch (command) { case COM_INIT_DB: handleInitDb(); break; case COM_QUIT: handleQuit(); break; case COM_QUERY: handleQuery(); ctx.setStartTime(); break; case COM_FIELD_LIST: handleFieldList(); break; case COM_CHANGE_USER: handleChangeUser(); break; case COM_RESET_CONNECTION: handleResetConnection(); break; case COM_PING: handlePing(); break; default: ctx.getState().setError("Unsupported command(" + command + ")"); LOG.warn("Unsupported command(" + command + ")"); break; } } private ByteBuffer getResultPacket() { MysqlPacket packet = ctx.getState().toResponsePacket(); if (packet == null) { return null; } MysqlSerializer serializer = ctx.getSerializer(); serializer.reset(); packet.writeTo(serializer); return serializer.toByteBuffer(); } private void finalizeCommand() throws IOException { ByteBuffer packet = null; if (executor != null && executor.isForwardToLeader()) { if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { packet = executor.getOutputPacket(); if (packet == null) { packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } } else { ShowResultSet resultSet = executor.getShowResultSet(); if (resultSet == null) { if (executor.sendResultToChannel(ctx.getMysqlChannel())) { packet = getResultPacket(); } else { packet = executor.getOutputPacket(); } } else { executor.sendShowResult(resultSet); packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } } } else { packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } MysqlChannel channel = ctx.getMysqlChannel(); channel.sendAndFlush(packet); if (ctx.getCommand() == MysqlCommand.COM_QUERY) { ctx.setLastQueryId(ctx.queryId); ctx.setQueryId(null); } } public void processOnce() throws IOException { ctx.getState().reset(); executor = null; final MysqlChannel channel = ctx.getMysqlChannel(); channel.setSequenceId(0); try { packetBuf = channel.fetchOnePacket(); if (packetBuf == null) { throw new IOException("Error happened when receiving packet."); } } catch (AsynchronousCloseException e) { return; } dispatch(); finalizeCommand(); ctx.setCommand(MysqlCommand.COM_SLEEP); } public void loop() { while (!ctx.isKilled()) { try { processOnce(); } catch (Exception e) { LOG.warn("Exception happened in one seesion(" + ctx + ").", e); ctx.setKilled(); break; } } } }
class ConnectProcessor { private static final Logger LOG = LogManager.getLogger(ConnectProcessor.class); private final ConnectContext ctx; private ByteBuffer packetBuf; private StmtExecutor executor = null; public ConnectProcessor(ConnectContext context) { this.ctx = context; } private void handleInitDb() { String identifier = new String(packetBuf.array(), 1, packetBuf.limit() - 1); try { String[] parts = identifier.trim().split("\\s+"); if (parts.length == 2) { if (parts[0].equalsIgnoreCase("catalog")) { ctx.getGlobalStateMgr().changeCatalog(ctx, parts[1]); } else if (parts[0].equalsIgnoreCase("warehouse")) { ctx.getGlobalStateMgr().changeWarehouse(ctx, parts[1]); } else { ctx.getState().setError("not supported command"); } } else { ctx.getGlobalStateMgr().changeCatalogDb(ctx, identifier); } } catch (Exception e) { ctx.getState().setError(e.getMessage()); return; } ctx.getState().setOk(); } private void handleQuit() { ctx.setKilled(); ctx.getState().setOk(); } private void handleChangeUser() throws IOException { if (!MysqlProto.changeUser(ctx, packetBuf)) { LOG.warn("Failed to execute command `Change user`."); return; } handleResetConnection(); } private void handleResetConnection() throws IOException { resetConnectionSession(); ctx.getState().setOk(); } private void handlePing() { ctx.getState().setOk(); } private void resetConnectionSession() { ctx.getSerializer().reset(); ctx.getSerializer().setCapability(ctx.getCapability()); ctx.resetSessionVariable(); } public void auditAfterExec(String origStmt, StatementBase parsedStmt, PQueryStatistics statistics) { long endTime = System.currentTimeMillis(); long elapseMs = endTime - ctx.getStartTime(); ctx.getAuditEventBuilder().setEventType(EventType.AFTER_QUERY) .setState(ctx.getState().toString()).setErrorCode(ctx.getErrorCode()).setQueryTime(elapseMs) .setReturnRows(ctx.getReturnRows()) .setStmtId(ctx.getStmtId()) .setQueryId(ctx.getQueryId() == null ? "NaN" : ctx.getQueryId().toString()); if (statistics != null) { ctx.getAuditEventBuilder().setScanBytes(statistics.scanBytes); ctx.getAuditEventBuilder().setScanRows(statistics.scanRows); ctx.getAuditEventBuilder().setCpuCostNs(statistics.cpuCostNs == null ? -1 : statistics.cpuCostNs); ctx.getAuditEventBuilder().setMemCostBytes(statistics.memCostBytes == null ? -1 : statistics.memCostBytes); } if (ctx.getState().isQuery()) { MetricRepo.COUNTER_QUERY_ALL.increase(1L); ResourceGroupMetricMgr.increaseQuery(ctx, 1L); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { MetricRepo.COUNTER_QUERY_ERR.increase(1L); ResourceGroupMetricMgr.increaseQueryErr(ctx, 1L); } else { MetricRepo.COUNTER_QUERY_SUCCESS.increase(1L); MetricRepo.HISTO_QUERY_LATENCY.update(elapseMs); ResourceGroupMetricMgr.updateQueryLatency(ctx, elapseMs); if (elapseMs > Config.qe_slow_log_ms || ctx.getSessionVariable().isEnableSQLDigest()) { MetricRepo.COUNTER_SLOW_QUERY.increase(1L); ctx.getAuditEventBuilder().setDigest(computeStatementDigest(parsedStmt)); } } ctx.getAuditEventBuilder().setIsQuery(true); if (ctx.getSessionVariable().isEnableBigQueryLog()) { ctx.getAuditEventBuilder().setBigQueryLogCPUSecondThreshold( ctx.getSessionVariable().getBigQueryLogCPUSecondThreshold()); ctx.getAuditEventBuilder().setBigQueryLogScanBytesThreshold( ctx.getSessionVariable().getBigQueryLogScanBytesThreshold()); ctx.getAuditEventBuilder().setBigQueryLogScanRowsThreshold( ctx.getSessionVariable().getBigQueryLogScanRowsThreshold()); } } else { ctx.getAuditEventBuilder().setIsQuery(false); } ctx.getAuditEventBuilder().setFeIp(FrontendOptions.getLocalHostAddress()); if (!ctx.getState().isQuery() && (parsedStmt != null && parsedStmt.needAuditEncryption())) { ctx.getAuditEventBuilder().setStmt(AstToStringBuilder.toString(parsedStmt)); } else if (ctx.getState().isQuery() && containsComment(origStmt)) { ctx.getAuditEventBuilder().setStmt(origStmt); } else { ctx.getAuditEventBuilder().setStmt(origStmt.replace("\n", " ")); } GlobalStateMgr.getCurrentAuditEventProcessor().handleAuditEvent(ctx.getAuditEventBuilder().build()); } public String computeStatementDigest(StatementBase queryStmt) { String digest = SqlDigestBuilder.build(queryStmt); try { MessageDigest md = MessageDigest.getInstance("MD5"); md.reset(); md.update(digest.getBytes()); return Hex.encodeHexString(md.digest()); } catch (NoSuchAlgorithmException e) { return ""; } } private boolean containsComment(String sql) { return (sql.contains("--")) || sql.contains(" } private void addFinishedQueryDetail() { if (!Config.enable_collect_query_detail_info) { return; } QueryDetail queryDetail = ctx.getQueryDetail(); if (queryDetail == null || !queryDetail.getQueryId().equals(DebugUtil.printId(ctx.getQueryId()))) { return; } long endTime = System.currentTimeMillis(); long elapseMs = endTime - ctx.getStartTime(); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { queryDetail.setState(QueryDetail.QueryMemState.FAILED); queryDetail.setErrorMessage(ctx.getState().getErrorMessage()); } else { queryDetail.setState(QueryDetail.QueryMemState.FINISHED); } queryDetail.setEndTime(endTime); queryDetail.setLatency(elapseMs); queryDetail.setResourceGroupName(ctx.getResourceGroup() != null ? ctx.getResourceGroup().getName() : ""); QueryDetailQueue.addAndRemoveTimeoutQueryDetail(queryDetail); } private void addRunningQueryDetail(StatementBase parsedStmt) { if (!Config.enable_collect_query_detail_info) { return; } String sql = parsedStmt.getOrigStmt().originStmt; boolean isQuery = parsedStmt instanceof QueryStatement; QueryDetail queryDetail = new QueryDetail( DebugUtil.printId(ctx.getQueryId()), isQuery, ctx.connectionId, ctx.getMysqlChannel() != null ? ctx.getMysqlChannel().getRemoteIp() : "System", ctx.getStartTime(), -1, -1, QueryDetail.QueryMemState.RUNNING, ctx.getDatabase(), sql, ctx.getQualifiedUser(), Optional.ofNullable(ctx.getResourceGroup()).map(TWorkGroup::getName).orElse("")); ctx.setQueryDetail(queryDetail); QueryDetailQueue.addAndRemoveTimeoutQueryDetail(queryDetail.copy()); } private void handleQuery() { MetricRepo.COUNTER_REQUEST_ALL.increase(1L); String originStmt = null; byte[] bytes = packetBuf.array(); int ending = packetBuf.limit() - 1; while (ending >= 1 && bytes[ending] == '\0') { ending--; } originStmt = new String(bytes, 1, ending, StandardCharsets.UTF_8); ctx.getAuditEventBuilder().reset(); ctx.getAuditEventBuilder() .setTimestamp(System.currentTimeMillis()) .setClientIp(ctx.getMysqlChannel().getRemoteHostPortString()) .setUser(ctx.getQualifiedUser()) .setAuthorizedUser( ctx.getCurrentUserIdentity() == null ? "null" : ctx.getCurrentUserIdentity().toString()) .setDb(ctx.getDatabase()) .setCatalog(ctx.getCurrentCatalog()); ctx.getPlannerProfile().reset(); StatementBase parsedStmt = null; try { ctx.setQueryId(UUIDUtil.genUUID()); List<StatementBase> stmts; try { stmts = com.starrocks.sql.parser.SqlParser.parse(originStmt, ctx.getSessionVariable()); } catch (ParsingException parsingException) { throw new AnalysisException(parsingException.getMessage()); } for (int i = 0; i < stmts.size(); ++i) { ctx.getState().reset(); if (i > 0) { ctx.resetRetureRows(); ctx.setQueryId(UUIDUtil.genUUID()); } parsedStmt = stmts.get(i); parsedStmt.setOrigStmt(new OriginStatement(originStmt, i)); if (i == stmts.size() - 1) { addRunningQueryDetail(parsedStmt); } executor = new StmtExecutor(ctx, parsedStmt); ctx.setExecutor(executor); ctx.setIsLastStmt(i == stmts.size() - 1); executor.execute(); if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { break; } if (i != stmts.size() - 1) { ctx.getState().serverStatus |= MysqlServerStatusFlag.SERVER_MORE_RESULTS_EXISTS; finalizeCommand(); } } } catch (IOException e) { LOG.warn("Process one query failed because IOException: ", e); ctx.getState().setError("StarRocks process failed"); } catch (UserException e) { LOG.warn("Process one query failed because.", e); ctx.getState().setError(e.getMessage()); ctx.getState().setErrType(QueryState.ErrType.ANALYSIS_ERR); } catch (Throwable e) { LOG.warn("Process one query failed because unknown reason: ", e); ctx.getState().setError("Unexpected exception: " + e.getMessage()); if (parsedStmt instanceof KillStmt) { ctx.getState().setErrType(QueryState.ErrType.ANALYSIS_ERR); } } if (executor != null) { auditAfterExec(originStmt, executor.getParsedStmt(), executor.getQueryStatisticsForAuditLog()); } else { auditAfterExec(originStmt, null, null); } addFinishedQueryDetail(); } private void handleFieldList() throws IOException { String tableName = new String(MysqlProto.readNulTerminateString(packetBuf), StandardCharsets.UTF_8); if (Strings.isNullOrEmpty(tableName)) { ctx.getState().setError("Empty tableName"); return; } Database db = ctx.getGlobalStateMgr().getMetadataMgr().getDb(ctx.getCurrentCatalog(), ctx.getDatabase()); if (db == null) { ctx.getState().setError("Unknown database(" + ctx.getDatabase() + ")"); return; } db.readLock(); try { Table table = ctx.getGlobalStateMgr().getMetadataMgr().getTable( ctx.getCurrentCatalog(), ctx.getDatabase(), tableName); if (table == null) { ctx.getState().setError("Unknown table(" + tableName + ")"); return; } MysqlSerializer serializer = ctx.getSerializer(); MysqlChannel channel = ctx.getMysqlChannel(); List<Column> baseSchema = table.getBaseSchema(); for (Column column : baseSchema) { serializer.reset(); serializer.writeField(db.getOriginName(), table.getName(), column, true); channel.sendOnePacket(serializer.toByteBuffer()); } } catch (StarRocksIcebergException e) { LOG.error("errors happened when getting Iceberg table {}", tableName, e); } catch (StarRocksConnectorException e) { LOG.error("errors happened when getting table {}", tableName, e); } finally { db.readUnlock(); } ctx.getState().setEof(); } private void dispatch() throws IOException { int code = packetBuf.get(); MysqlCommand command = MysqlCommand.fromCode(code); if (command == null) { ErrorReport.report(ErrorCode.ERR_UNKNOWN_COM_ERROR); ctx.getState().setError("Unknown command(" + command + ")"); LOG.warn("Unknown command(" + command + ")"); return; } ctx.setCommand(command); ctx.setStartTime(); ctx.setResourceGroup(null); ctx.setErrorCode(""); switch (command) { case COM_INIT_DB: handleInitDb(); break; case COM_QUIT: handleQuit(); break; case COM_QUERY: handleQuery(); ctx.setStartTime(); break; case COM_FIELD_LIST: handleFieldList(); break; case COM_CHANGE_USER: handleChangeUser(); break; case COM_RESET_CONNECTION: handleResetConnection(); break; case COM_PING: handlePing(); break; default: ctx.getState().setError("Unsupported command(" + command + ")"); LOG.warn("Unsupported command(" + command + ")"); break; } } private ByteBuffer getResultPacket() { MysqlPacket packet = ctx.getState().toResponsePacket(); if (packet == null) { return null; } MysqlSerializer serializer = ctx.getSerializer(); serializer.reset(); packet.writeTo(serializer); return serializer.toByteBuffer(); } private void finalizeCommand() throws IOException { ByteBuffer packet = null; if (executor != null && executor.isForwardToLeader()) { if (ctx.getState().getStateType() == QueryState.MysqlStateType.ERR) { packet = executor.getOutputPacket(); if (packet == null) { packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } } else { ShowResultSet resultSet = executor.getShowResultSet(); if (resultSet == null) { if (executor.sendResultToChannel(ctx.getMysqlChannel())) { packet = getResultPacket(); } else { packet = executor.getOutputPacket(); } } else { executor.sendShowResult(resultSet); packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } } } else { packet = getResultPacket(); if (packet == null) { LOG.debug("packet == null"); return; } } MysqlChannel channel = ctx.getMysqlChannel(); channel.sendAndFlush(packet); if (ctx.getCommand() == MysqlCommand.COM_QUERY) { ctx.setLastQueryId(ctx.queryId); ctx.setQueryId(null); } } public void processOnce() throws IOException { ctx.getState().reset(); executor = null; final MysqlChannel channel = ctx.getMysqlChannel(); channel.setSequenceId(0); try { packetBuf = channel.fetchOnePacket(); if (packetBuf == null) { throw new IOException("Error happened when receiving packet."); } } catch (AsynchronousCloseException e) { return; } dispatch(); finalizeCommand(); ctx.setCommand(MysqlCommand.COM_SLEEP); } public void loop() { while (!ctx.isKilled()) { try { processOnce(); } catch (Exception e) { LOG.warn("Exception happened in one seesion(" + ctx + ").", e); ctx.setKilled(); break; } } } }
Printing `acquiredResources` can actually produce quite some text. Hence, I am wondering whether we should really include this information in every `NoResourceAvailableException` since it will be logged for every request later on. Maybe it could be better to log it once in this method and then fail the requests w/o this information. ``` log.info("Could not acquire the minimum required resources. Failing all pending requests. Acquired {}. Current slot pool status: {}.", acquiredResources, getSlotServiceStatus()); ``` What do you think @rkhachatryan?
private void failPendingRequests(Collection<ResourceRequirement> acquiredResources) { if (!pendingRequests.isEmpty()) { final NoResourceAvailableException cause = new NoResourceAvailableException( "Could not acquire the minimum required resources. Acquired: " + acquiredResources + ". Current slot pool status: " + getSlotServiceStatus()); cancelPendingRequests( request -> !isBatchSlotRequestTimeoutCheckDisabled || !request.isBatchRequest(), cause); } }
+ getSlotServiceStatus());
private void failPendingRequests(Collection<ResourceRequirement> acquiredResources) { if (!pendingRequests.isEmpty()) { final NoResourceAvailableException cause = new NoResourceAvailableException( "Could not acquire the minimum required resources. Acquired: " + acquiredResources + ". Current slot pool status: " + getSlotServiceStatus()); cancelPendingRequests( request -> !isBatchSlotRequestTimeoutCheckDisabled || !request.isBatchRequest(), cause); } }
class DeclarativeSlotPoolBridge extends DeclarativeSlotPoolService implements SlotPool { private final Map<SlotRequestId, PendingRequest> pendingRequests; private final Map<SlotRequestId, AllocationID> fulfilledRequests; private final Time idleSlotTimeout; @Nullable private ComponentMainThreadExecutor componentMainThreadExecutor; private final Time batchSlotTimeout; private boolean isBatchSlotRequestTimeoutCheckDisabled; public DeclarativeSlotPoolBridge( JobID jobId, DeclarativeSlotPoolFactory declarativeSlotPoolFactory, Clock clock, Time rpcTimeout, Time idleSlotTimeout, Time batchSlotTimeout) { super(jobId, declarativeSlotPoolFactory, clock, idleSlotTimeout, rpcTimeout); this.idleSlotTimeout = idleSlotTimeout; this.batchSlotTimeout = Preconditions.checkNotNull(batchSlotTimeout); this.isBatchSlotRequestTimeoutCheckDisabled = false; this.pendingRequests = new LinkedHashMap<>(); this.fulfilledRequests = new HashMap<>(); } @Override public <T> Optional<T> castInto(Class<T> clazz) { if (clazz.isAssignableFrom(getClass())) { return Optional.of(clazz.cast(this)); } return Optional.empty(); } @Override protected void onStart(ComponentMainThreadExecutor componentMainThreadExecutor) { this.componentMainThreadExecutor = componentMainThreadExecutor; getDeclarativeSlotPool().registerNewSlotsListener(this::newSlotsAreAvailable); componentMainThreadExecutor.schedule( this::checkIdleSlotTimeout, idleSlotTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); componentMainThreadExecutor.schedule( this::checkBatchSlotTimeout, batchSlotTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); } @Override protected void onClose() { final FlinkException cause = new FlinkException("Closing slot pool"); cancelPendingRequests(request -> true, cause); } private void cancelPendingRequests( Predicate<PendingRequest> requestPredicate, FlinkException cancelCause) { ResourceCounter decreasedResourceRequirements = ResourceCounter.empty(); final Iterable<PendingRequest> pendingRequestsToFail = new ArrayList<>(pendingRequests.values()); pendingRequests.clear(); for (PendingRequest pendingRequest : pendingRequestsToFail) { if (requestPredicate.test(pendingRequest)) { pendingRequest.failRequest(cancelCause); decreasedResourceRequirements = decreasedResourceRequirements.add(pendingRequest.getResourceProfile(), 1); } else { pendingRequests.put(pendingRequest.getSlotRequestId(), pendingRequest); } } getDeclarativeSlotPool().decreaseResourceRequirementsBy(decreasedResourceRequirements); } @Override protected void onReleaseTaskManager(ResourceCounter previouslyFulfilledRequirement) { getDeclarativeSlotPool().decreaseResourceRequirementsBy(previouslyFulfilledRequirement); } @VisibleForTesting void newSlotsAreAvailable(Collection<? extends PhysicalSlot> newSlots) { final Collection<PendingRequestSlotMatching> matchingsToFulfill = new ArrayList<>(); for (PhysicalSlot newSlot : newSlots) { final Optional<PendingRequest> matchingPendingRequest = findMatchingPendingRequest(newSlot); matchingPendingRequest.ifPresent( pendingRequest -> { Preconditions.checkNotNull( pendingRequests.remove(pendingRequest.getSlotRequestId()), "Cannot fulfill a non existing pending slot request."); reserveFreeSlot( pendingRequest.getSlotRequestId(), newSlot.getAllocationId(), pendingRequest.resourceProfile); matchingsToFulfill.add( PendingRequestSlotMatching.createFor(pendingRequest, newSlot)); }); } for (PendingRequestSlotMatching pendingRequestSlotMatching : matchingsToFulfill) { pendingRequestSlotMatching.fulfillPendingRequest(); } } private void reserveFreeSlot( SlotRequestId slotRequestId, AllocationID allocationId, ResourceProfile resourceProfile) { log.debug("Reserve slot {} for slot request id {}", allocationId, slotRequestId); getDeclarativeSlotPool().reserveFreeSlot(allocationId, resourceProfile); fulfilledRequests.put(slotRequestId, allocationId); } private Optional<PendingRequest> findMatchingPendingRequest(PhysicalSlot slot) { final ResourceProfile resourceProfile = slot.getResourceProfile(); for (PendingRequest pendingRequest : pendingRequests.values()) { if (resourceProfile.isMatching(pendingRequest.getResourceProfile())) { log.debug("Matched slot {} to pending request {}.", slot, pendingRequest); return Optional.of(pendingRequest); } } log.debug("Could not match slot {} to any pending request.", slot); return Optional.empty(); } @Override public Optional<PhysicalSlot> allocateAvailableSlot( @Nonnull SlotRequestId slotRequestId, @Nonnull AllocationID allocationID, @Nonnull ResourceProfile requirementProfile) { assertRunningInMainThread(); Preconditions.checkNotNull(requirementProfile, "The requiredSlotProfile must not be null."); log.debug( "Reserving free slot {} for slot request id {} and profile {}.", allocationID, slotRequestId, requirementProfile); return Optional.of( reserveFreeSlotForResource(slotRequestId, allocationID, requirementProfile)); } private PhysicalSlot reserveFreeSlotForResource( SlotRequestId slotRequestId, AllocationID allocationId, ResourceProfile requiredSlotProfile) { getDeclarativeSlotPool() .increaseResourceRequirementsBy( ResourceCounter.withResource(requiredSlotProfile, 1)); final PhysicalSlot physicalSlot = getDeclarativeSlotPool().reserveFreeSlot(allocationId, requiredSlotProfile); fulfilledRequests.put(slotRequestId, allocationId); return physicalSlot; } @Override @Nonnull public CompletableFuture<PhysicalSlot> requestNewAllocatedSlot( @Nonnull SlotRequestId slotRequestId, @Nonnull ResourceProfile resourceProfile, @Nullable Time timeout) { assertRunningInMainThread(); log.debug( "Request new allocated slot with slot request id {} and resource profile {}", slotRequestId, resourceProfile); final PendingRequest pendingRequest = PendingRequest.createNormalRequest(slotRequestId, resourceProfile); return internalRequestNewSlot(pendingRequest, timeout); } @Override @Nonnull public CompletableFuture<PhysicalSlot> requestNewAllocatedBatchSlot( @Nonnull SlotRequestId slotRequestId, @Nonnull ResourceProfile resourceProfile) { assertRunningInMainThread(); log.debug( "Request new allocated batch slot with slot request id {} and resource profile {}", slotRequestId, resourceProfile); final PendingRequest pendingRequest = PendingRequest.createBatchRequest(slotRequestId, resourceProfile); return internalRequestNewSlot(pendingRequest, null); } private CompletableFuture<PhysicalSlot> internalRequestNewSlot( PendingRequest pendingRequest, @Nullable Time timeout) { internalRequestNewAllocatedSlot(pendingRequest); if (timeout == null) { return pendingRequest.getSlotFuture(); } else { return FutureUtils.orTimeout( pendingRequest.getSlotFuture(), timeout.toMilliseconds(), TimeUnit.MILLISECONDS, componentMainThreadExecutor) .whenComplete( (physicalSlot, throwable) -> { if (throwable instanceof TimeoutException) { timeoutPendingSlotRequest(pendingRequest.getSlotRequestId()); } }); } } private void timeoutPendingSlotRequest(SlotRequestId slotRequestId) { releaseSlot( slotRequestId, new TimeoutException("Pending slot request timed out in slot pool.")); } private void internalRequestNewAllocatedSlot(PendingRequest pendingRequest) { pendingRequests.put(pendingRequest.getSlotRequestId(), pendingRequest); getDeclarativeSlotPool() .increaseResourceRequirementsBy( ResourceCounter.withResource(pendingRequest.getResourceProfile(), 1)); } @Override public Optional<ResourceID> failAllocation(AllocationID allocationID, Exception cause) { throw new UnsupportedOperationException( "Please call failAllocation(ResourceID, AllocationID, Exception)"); } @Override protected void onFailAllocation(ResourceCounter previouslyFulfilledRequirements) { getDeclarativeSlotPool().decreaseResourceRequirementsBy(previouslyFulfilledRequirements); } @Override public void releaseSlot(@Nonnull SlotRequestId slotRequestId, @Nullable Throwable cause) { log.debug("Release slot with slot request id {}", slotRequestId); assertRunningInMainThread(); final PendingRequest pendingRequest = pendingRequests.remove(slotRequestId); if (pendingRequest != null) { getDeclarativeSlotPool() .decreaseResourceRequirementsBy( ResourceCounter.withResource(pendingRequest.getResourceProfile(), 1)); pendingRequest.failRequest( new FlinkException( String.format( "Pending slot request with %s has been released.", pendingRequest.getSlotRequestId()), cause)); } else { final AllocationID allocationId = fulfilledRequests.remove(slotRequestId); if (allocationId != null) { ResourceCounter previouslyFulfilledRequirement = getDeclarativeSlotPool() .freeReservedSlot(allocationId, cause, getRelativeTimeMillis()); getDeclarativeSlotPool() .decreaseResourceRequirementsBy(previouslyFulfilledRequirement); } else { log.debug( "Could not find slot which has fulfilled slot request {}. Ignoring the release operation.", slotRequestId); } } } @Override public void notifyNotEnoughResourcesAvailable( Collection<ResourceRequirement> acquiredResources) { assertRunningInMainThread(); failPendingRequests(acquiredResources); } @Override public Collection<SlotInfo> getAllocatedSlotsInformation() { assertRunningInMainThread(); final Collection<? extends SlotInfo> allSlotsInformation = getDeclarativeSlotPool().getAllSlotsInformation(); final Set<AllocationID> freeSlots = getDeclarativeSlotPool().getFreeSlotsInformation().stream() .map(SlotInfoWithUtilization::getAllocationId) .collect(Collectors.toSet()); return allSlotsInformation.stream() .filter(slotInfo -> !freeSlots.contains(slotInfo.getAllocationId())) .collect(Collectors.toList()); } @Override @Nonnull public Collection<SlotInfoWithUtilization> getAvailableSlotsInformation() { assertRunningInMainThread(); return getDeclarativeSlotPool().getFreeSlotsInformation(); } @Override public void disableBatchSlotRequestTimeoutCheck() { isBatchSlotRequestTimeoutCheckDisabled = true; } private void assertRunningInMainThread() { if (componentMainThreadExecutor != null) { componentMainThreadExecutor.assertRunningInMainThread(); } else { throw new IllegalStateException("The FutureSlotPool has not been started yet."); } } private void checkIdleSlotTimeout() { getDeclarativeSlotPool().releaseIdleSlots(getRelativeTimeMillis()); if (componentMainThreadExecutor != null) { componentMainThreadExecutor.schedule( this::checkIdleSlotTimeout, idleSlotTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); } } private void checkBatchSlotTimeout() { if (isBatchSlotRequestTimeoutCheckDisabled) { return; } final Collection<PendingRequest> pendingBatchRequests = getPendingBatchRequests(); if (!pendingBatchRequests.isEmpty()) { final Set<ResourceProfile> allResourceProfiles = getResourceProfilesFromAllSlots(); final Map<Boolean, List<PendingRequest>> fulfillableAndUnfulfillableRequests = pendingBatchRequests.stream() .collect( Collectors.partitioningBy( canBeFulfilledWithAnySlot(allResourceProfiles))); final List<PendingRequest> fulfillableRequests = fulfillableAndUnfulfillableRequests.get(true); final List<PendingRequest> unfulfillableRequests = fulfillableAndUnfulfillableRequests.get(false); final long currentTimestamp = getRelativeTimeMillis(); for (PendingRequest fulfillableRequest : fulfillableRequests) { fulfillableRequest.markFulfillable(); } for (PendingRequest unfulfillableRequest : unfulfillableRequests) { unfulfillableRequest.markUnfulfillable(currentTimestamp); if (unfulfillableRequest.getUnfulfillableSince() + batchSlotTimeout.toMilliseconds() <= currentTimestamp) { timeoutPendingSlotRequest(unfulfillableRequest.getSlotRequestId()); } } } if (componentMainThreadExecutor != null) { componentMainThreadExecutor.schedule( this::checkBatchSlotTimeout, batchSlotTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); } } private Set<ResourceProfile> getResourceProfilesFromAllSlots() { return Stream.concat( getAvailableSlotsInformation().stream(), getAllocatedSlotsInformation().stream()) .map(SlotInfo::getResourceProfile) .collect(Collectors.toSet()); } private Collection<PendingRequest> getPendingBatchRequests() { return pendingRequests.values().stream() .filter(PendingRequest::isBatchRequest) .collect(Collectors.toList()); } private static Predicate<PendingRequest> canBeFulfilledWithAnySlot( Set<ResourceProfile> allocatedResourceProfiles) { return pendingRequest -> { for (ResourceProfile allocatedResourceProfile : allocatedResourceProfiles) { if (allocatedResourceProfile.isMatching(pendingRequest.getResourceProfile())) { return true; } } return false; }; } private static final class PendingRequest { private final SlotRequestId slotRequestId; private final ResourceProfile resourceProfile; private final CompletableFuture<PhysicalSlot> slotFuture; private final boolean isBatchRequest; private long unfulfillableSince; private PendingRequest( SlotRequestId slotRequestId, ResourceProfile resourceProfile, boolean isBatchRequest) { this.slotRequestId = slotRequestId; this.resourceProfile = resourceProfile; this.isBatchRequest = isBatchRequest; this.slotFuture = new CompletableFuture<>(); this.unfulfillableSince = Long.MAX_VALUE; } static PendingRequest createBatchRequest( SlotRequestId slotRequestId, ResourceProfile resourceProfile) { return new PendingRequest(slotRequestId, resourceProfile, true); } static PendingRequest createNormalRequest( SlotRequestId slotRequestId, ResourceProfile resourceProfile) { return new PendingRequest(slotRequestId, resourceProfile, false); } SlotRequestId getSlotRequestId() { return slotRequestId; } ResourceProfile getResourceProfile() { return resourceProfile; } CompletableFuture<PhysicalSlot> getSlotFuture() { return slotFuture; } void failRequest(Exception cause) { slotFuture.completeExceptionally(cause); } public boolean isBatchRequest() { return isBatchRequest; } public void markFulfillable() { this.unfulfillableSince = Long.MAX_VALUE; } public void markUnfulfillable(long currentTimestamp) { this.unfulfillableSince = currentTimestamp; } public long getUnfulfillableSince() { return unfulfillableSince; } public boolean fulfill(PhysicalSlot slot) { return slotFuture.complete(slot); } @Override public String toString() { return "PendingRequest{" + "slotRequestId=" + slotRequestId + ", resourceProfile=" + resourceProfile + ", isBatchRequest=" + isBatchRequest + ", unfulfillableSince=" + unfulfillableSince + '}'; } } private static final class PendingRequestSlotMatching { private final PendingRequest pendingRequest; private final PhysicalSlot matchedSlot; private PendingRequestSlotMatching( PendingRequest pendingRequest, PhysicalSlot matchedSlot) { this.pendingRequest = pendingRequest; this.matchedSlot = matchedSlot; } public static PendingRequestSlotMatching createFor( PendingRequest pendingRequest, PhysicalSlot newSlot) { return new PendingRequestSlotMatching(pendingRequest, newSlot); } public void fulfillPendingRequest() { Preconditions.checkState( pendingRequest.fulfill(matchedSlot), "Pending requests must be fulfillable."); } } }
class DeclarativeSlotPoolBridge extends DeclarativeSlotPoolService implements SlotPool { private final Map<SlotRequestId, PendingRequest> pendingRequests; private final Map<SlotRequestId, AllocationID> fulfilledRequests; private final Time idleSlotTimeout; @Nullable private ComponentMainThreadExecutor componentMainThreadExecutor; private final Time batchSlotTimeout; private boolean isBatchSlotRequestTimeoutCheckDisabled; public DeclarativeSlotPoolBridge( JobID jobId, DeclarativeSlotPoolFactory declarativeSlotPoolFactory, Clock clock, Time rpcTimeout, Time idleSlotTimeout, Time batchSlotTimeout) { super(jobId, declarativeSlotPoolFactory, clock, idleSlotTimeout, rpcTimeout); this.idleSlotTimeout = idleSlotTimeout; this.batchSlotTimeout = Preconditions.checkNotNull(batchSlotTimeout); this.isBatchSlotRequestTimeoutCheckDisabled = false; this.pendingRequests = new LinkedHashMap<>(); this.fulfilledRequests = new HashMap<>(); } @Override public <T> Optional<T> castInto(Class<T> clazz) { if (clazz.isAssignableFrom(getClass())) { return Optional.of(clazz.cast(this)); } return Optional.empty(); } @Override protected void onStart(ComponentMainThreadExecutor componentMainThreadExecutor) { this.componentMainThreadExecutor = componentMainThreadExecutor; getDeclarativeSlotPool().registerNewSlotsListener(this::newSlotsAreAvailable); componentMainThreadExecutor.schedule( this::checkIdleSlotTimeout, idleSlotTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); componentMainThreadExecutor.schedule( this::checkBatchSlotTimeout, batchSlotTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); } @Override protected void onClose() { final FlinkException cause = new FlinkException("Closing slot pool"); cancelPendingRequests(request -> true, cause); } private void cancelPendingRequests( Predicate<PendingRequest> requestPredicate, FlinkException cancelCause) { ResourceCounter decreasedResourceRequirements = ResourceCounter.empty(); final Iterable<PendingRequest> pendingRequestsToFail = new ArrayList<>(pendingRequests.values()); pendingRequests.clear(); for (PendingRequest pendingRequest : pendingRequestsToFail) { if (requestPredicate.test(pendingRequest)) { pendingRequest.failRequest(cancelCause); decreasedResourceRequirements = decreasedResourceRequirements.add(pendingRequest.getResourceProfile(), 1); } else { pendingRequests.put(pendingRequest.getSlotRequestId(), pendingRequest); } } getDeclarativeSlotPool().decreaseResourceRequirementsBy(decreasedResourceRequirements); } @Override protected void onReleaseTaskManager(ResourceCounter previouslyFulfilledRequirement) { getDeclarativeSlotPool().decreaseResourceRequirementsBy(previouslyFulfilledRequirement); } @VisibleForTesting void newSlotsAreAvailable(Collection<? extends PhysicalSlot> newSlots) { final Collection<PendingRequestSlotMatching> matchingsToFulfill = new ArrayList<>(); for (PhysicalSlot newSlot : newSlots) { final Optional<PendingRequest> matchingPendingRequest = findMatchingPendingRequest(newSlot); matchingPendingRequest.ifPresent( pendingRequest -> { Preconditions.checkNotNull( pendingRequests.remove(pendingRequest.getSlotRequestId()), "Cannot fulfill a non existing pending slot request."); reserveFreeSlot( pendingRequest.getSlotRequestId(), newSlot.getAllocationId(), pendingRequest.resourceProfile); matchingsToFulfill.add( PendingRequestSlotMatching.createFor(pendingRequest, newSlot)); }); } for (PendingRequestSlotMatching pendingRequestSlotMatching : matchingsToFulfill) { pendingRequestSlotMatching.fulfillPendingRequest(); } } private void reserveFreeSlot( SlotRequestId slotRequestId, AllocationID allocationId, ResourceProfile resourceProfile) { log.debug("Reserve slot {} for slot request id {}", allocationId, slotRequestId); getDeclarativeSlotPool().reserveFreeSlot(allocationId, resourceProfile); fulfilledRequests.put(slotRequestId, allocationId); } private Optional<PendingRequest> findMatchingPendingRequest(PhysicalSlot slot) { final ResourceProfile resourceProfile = slot.getResourceProfile(); for (PendingRequest pendingRequest : pendingRequests.values()) { if (resourceProfile.isMatching(pendingRequest.getResourceProfile())) { log.debug("Matched slot {} to pending request {}.", slot, pendingRequest); return Optional.of(pendingRequest); } } log.debug("Could not match slot {} to any pending request.", slot); return Optional.empty(); } @Override public Optional<PhysicalSlot> allocateAvailableSlot( @Nonnull SlotRequestId slotRequestId, @Nonnull AllocationID allocationID, @Nonnull ResourceProfile requirementProfile) { assertRunningInMainThread(); Preconditions.checkNotNull(requirementProfile, "The requiredSlotProfile must not be null."); log.debug( "Reserving free slot {} for slot request id {} and profile {}.", allocationID, slotRequestId, requirementProfile); return Optional.of( reserveFreeSlotForResource(slotRequestId, allocationID, requirementProfile)); } private PhysicalSlot reserveFreeSlotForResource( SlotRequestId slotRequestId, AllocationID allocationId, ResourceProfile requiredSlotProfile) { getDeclarativeSlotPool() .increaseResourceRequirementsBy( ResourceCounter.withResource(requiredSlotProfile, 1)); final PhysicalSlot physicalSlot = getDeclarativeSlotPool().reserveFreeSlot(allocationId, requiredSlotProfile); fulfilledRequests.put(slotRequestId, allocationId); return physicalSlot; } @Override @Nonnull public CompletableFuture<PhysicalSlot> requestNewAllocatedSlot( @Nonnull SlotRequestId slotRequestId, @Nonnull ResourceProfile resourceProfile, @Nullable Time timeout) { assertRunningInMainThread(); log.debug( "Request new allocated slot with slot request id {} and resource profile {}", slotRequestId, resourceProfile); final PendingRequest pendingRequest = PendingRequest.createNormalRequest(slotRequestId, resourceProfile); return internalRequestNewSlot(pendingRequest, timeout); } @Override @Nonnull public CompletableFuture<PhysicalSlot> requestNewAllocatedBatchSlot( @Nonnull SlotRequestId slotRequestId, @Nonnull ResourceProfile resourceProfile) { assertRunningInMainThread(); log.debug( "Request new allocated batch slot with slot request id {} and resource profile {}", slotRequestId, resourceProfile); final PendingRequest pendingRequest = PendingRequest.createBatchRequest(slotRequestId, resourceProfile); return internalRequestNewSlot(pendingRequest, null); } private CompletableFuture<PhysicalSlot> internalRequestNewSlot( PendingRequest pendingRequest, @Nullable Time timeout) { internalRequestNewAllocatedSlot(pendingRequest); if (timeout == null) { return pendingRequest.getSlotFuture(); } else { return FutureUtils.orTimeout( pendingRequest.getSlotFuture(), timeout.toMilliseconds(), TimeUnit.MILLISECONDS, componentMainThreadExecutor) .whenComplete( (physicalSlot, throwable) -> { if (throwable instanceof TimeoutException) { timeoutPendingSlotRequest(pendingRequest.getSlotRequestId()); } }); } } private void timeoutPendingSlotRequest(SlotRequestId slotRequestId) { releaseSlot( slotRequestId, new TimeoutException("Pending slot request timed out in slot pool.")); } private void internalRequestNewAllocatedSlot(PendingRequest pendingRequest) { pendingRequests.put(pendingRequest.getSlotRequestId(), pendingRequest); getDeclarativeSlotPool() .increaseResourceRequirementsBy( ResourceCounter.withResource(pendingRequest.getResourceProfile(), 1)); } @Override public Optional<ResourceID> failAllocation(AllocationID allocationID, Exception cause) { throw new UnsupportedOperationException( "Please call failAllocation(ResourceID, AllocationID, Exception)"); } @Override protected void onFailAllocation(ResourceCounter previouslyFulfilledRequirements) { getDeclarativeSlotPool().decreaseResourceRequirementsBy(previouslyFulfilledRequirements); } @Override public void releaseSlot(@Nonnull SlotRequestId slotRequestId, @Nullable Throwable cause) { log.debug("Release slot with slot request id {}", slotRequestId); assertRunningInMainThread(); final PendingRequest pendingRequest = pendingRequests.remove(slotRequestId); if (pendingRequest != null) { getDeclarativeSlotPool() .decreaseResourceRequirementsBy( ResourceCounter.withResource(pendingRequest.getResourceProfile(), 1)); pendingRequest.failRequest( new FlinkException( String.format( "Pending slot request with %s has been released.", pendingRequest.getSlotRequestId()), cause)); } else { final AllocationID allocationId = fulfilledRequests.remove(slotRequestId); if (allocationId != null) { ResourceCounter previouslyFulfilledRequirement = getDeclarativeSlotPool() .freeReservedSlot(allocationId, cause, getRelativeTimeMillis()); getDeclarativeSlotPool() .decreaseResourceRequirementsBy(previouslyFulfilledRequirement); } else { log.debug( "Could not find slot which has fulfilled slot request {}. Ignoring the release operation.", slotRequestId); } } } @Override public void notifyNotEnoughResourcesAvailable( Collection<ResourceRequirement> acquiredResources) { assertRunningInMainThread(); failPendingRequests(acquiredResources); } @Override public Collection<SlotInfo> getAllocatedSlotsInformation() { assertRunningInMainThread(); final Collection<? extends SlotInfo> allSlotsInformation = getDeclarativeSlotPool().getAllSlotsInformation(); final Set<AllocationID> freeSlots = getDeclarativeSlotPool().getFreeSlotsInformation().stream() .map(SlotInfoWithUtilization::getAllocationId) .collect(Collectors.toSet()); return allSlotsInformation.stream() .filter(slotInfo -> !freeSlots.contains(slotInfo.getAllocationId())) .collect(Collectors.toList()); } @Override @Nonnull public Collection<SlotInfoWithUtilization> getAvailableSlotsInformation() { assertRunningInMainThread(); return getDeclarativeSlotPool().getFreeSlotsInformation(); } @Override public void disableBatchSlotRequestTimeoutCheck() { isBatchSlotRequestTimeoutCheckDisabled = true; } private void assertRunningInMainThread() { if (componentMainThreadExecutor != null) { componentMainThreadExecutor.assertRunningInMainThread(); } else { throw new IllegalStateException("The FutureSlotPool has not been started yet."); } } private void checkIdleSlotTimeout() { getDeclarativeSlotPool().releaseIdleSlots(getRelativeTimeMillis()); if (componentMainThreadExecutor != null) { componentMainThreadExecutor.schedule( this::checkIdleSlotTimeout, idleSlotTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); } } private void checkBatchSlotTimeout() { if (isBatchSlotRequestTimeoutCheckDisabled) { return; } final Collection<PendingRequest> pendingBatchRequests = getPendingBatchRequests(); if (!pendingBatchRequests.isEmpty()) { final Set<ResourceProfile> allResourceProfiles = getResourceProfilesFromAllSlots(); final Map<Boolean, List<PendingRequest>> fulfillableAndUnfulfillableRequests = pendingBatchRequests.stream() .collect( Collectors.partitioningBy( canBeFulfilledWithAnySlot(allResourceProfiles))); final List<PendingRequest> fulfillableRequests = fulfillableAndUnfulfillableRequests.get(true); final List<PendingRequest> unfulfillableRequests = fulfillableAndUnfulfillableRequests.get(false); final long currentTimestamp = getRelativeTimeMillis(); for (PendingRequest fulfillableRequest : fulfillableRequests) { fulfillableRequest.markFulfillable(); } for (PendingRequest unfulfillableRequest : unfulfillableRequests) { unfulfillableRequest.markUnfulfillable(currentTimestamp); if (unfulfillableRequest.getUnfulfillableSince() + batchSlotTimeout.toMilliseconds() <= currentTimestamp) { timeoutPendingSlotRequest(unfulfillableRequest.getSlotRequestId()); } } } if (componentMainThreadExecutor != null) { componentMainThreadExecutor.schedule( this::checkBatchSlotTimeout, batchSlotTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); } } private Set<ResourceProfile> getResourceProfilesFromAllSlots() { return Stream.concat( getAvailableSlotsInformation().stream(), getAllocatedSlotsInformation().stream()) .map(SlotInfo::getResourceProfile) .collect(Collectors.toSet()); } private Collection<PendingRequest> getPendingBatchRequests() { return pendingRequests.values().stream() .filter(PendingRequest::isBatchRequest) .collect(Collectors.toList()); } private static Predicate<PendingRequest> canBeFulfilledWithAnySlot( Set<ResourceProfile> allocatedResourceProfiles) { return pendingRequest -> { for (ResourceProfile allocatedResourceProfile : allocatedResourceProfiles) { if (allocatedResourceProfile.isMatching(pendingRequest.getResourceProfile())) { return true; } } return false; }; } private static final class PendingRequest { private final SlotRequestId slotRequestId; private final ResourceProfile resourceProfile; private final CompletableFuture<PhysicalSlot> slotFuture; private final boolean isBatchRequest; private long unfulfillableSince; private PendingRequest( SlotRequestId slotRequestId, ResourceProfile resourceProfile, boolean isBatchRequest) { this.slotRequestId = slotRequestId; this.resourceProfile = resourceProfile; this.isBatchRequest = isBatchRequest; this.slotFuture = new CompletableFuture<>(); this.unfulfillableSince = Long.MAX_VALUE; } static PendingRequest createBatchRequest( SlotRequestId slotRequestId, ResourceProfile resourceProfile) { return new PendingRequest(slotRequestId, resourceProfile, true); } static PendingRequest createNormalRequest( SlotRequestId slotRequestId, ResourceProfile resourceProfile) { return new PendingRequest(slotRequestId, resourceProfile, false); } SlotRequestId getSlotRequestId() { return slotRequestId; } ResourceProfile getResourceProfile() { return resourceProfile; } CompletableFuture<PhysicalSlot> getSlotFuture() { return slotFuture; } void failRequest(Exception cause) { slotFuture.completeExceptionally(cause); } public boolean isBatchRequest() { return isBatchRequest; } public void markFulfillable() { this.unfulfillableSince = Long.MAX_VALUE; } public void markUnfulfillable(long currentTimestamp) { this.unfulfillableSince = currentTimestamp; } public long getUnfulfillableSince() { return unfulfillableSince; } public boolean fulfill(PhysicalSlot slot) { return slotFuture.complete(slot); } @Override public String toString() { return "PendingRequest{" + "slotRequestId=" + slotRequestId + ", resourceProfile=" + resourceProfile + ", isBatchRequest=" + isBatchRequest + ", unfulfillableSince=" + unfulfillableSince + '}'; } } private static final class PendingRequestSlotMatching { private final PendingRequest pendingRequest; private final PhysicalSlot matchedSlot; private PendingRequestSlotMatching( PendingRequest pendingRequest, PhysicalSlot matchedSlot) { this.pendingRequest = pendingRequest; this.matchedSlot = matchedSlot; } public static PendingRequestSlotMatching createFor( PendingRequest pendingRequest, PhysicalSlot newSlot) { return new PendingRequestSlotMatching(pendingRequest, newSlot); } public void fulfillPendingRequest() { Preconditions.checkState( pendingRequest.fulfill(matchedSlot), "Pending requests must be fulfillable."); } } }
should this path be more generic?
public void connectionAttributes() throws Exception { Map<String, String> attributes = new HashMap<String, String>(); attributes.put("_connector_name", "Apache Beam SingleStoreDB I/O"); attributes.put("_connector_version", ReleaseInfo.getReleaseInfo().getVersion()); attributes.put("_product_version", ReleaseInfo.getReleaseInfo().getVersion()); SingleStoreIO.DataSourceConfiguration dataSourceConfiguration = SingleStoreIO.DataSourceConfiguration.create(serverName + ":" + port) .withPassword(password) .withUsername(username); DataSource dataSource = dataSourceConfiguration.getDataSource(); try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery("select * from information_schema.mv_connection_attributes"); ) { while (rs.next()) { String attribute = rs.getString(3); String value = rs.getString(4); if (attributes.containsKey(attribute)) { assertEquals(attributes.get(attribute), value); attributes.remove(attribute); } } } catch (Exception e) { File file = new File("/home/amakarovych-ua/Test/log"); PrintStream ps = new PrintStream(file); e.printStackTrace(ps); ps.close(); throw e; } assertTrue(attributes.isEmpty()); }
File file = new File("/home/amakarovych-ua/Test/log");
public void connectionAttributes() throws Exception { Map<String, String> attributes = new HashMap<String, String>(); attributes.put("_connector_name", "Apache Beam SingleStoreDB I/O"); attributes.put("_connector_version", ReleaseInfo.getReleaseInfo().getVersion()); attributes.put("_product_version", ReleaseInfo.getReleaseInfo().getVersion()); SingleStoreIO.DataSourceConfiguration dataSourceConfiguration = SingleStoreIO.DataSourceConfiguration.create(serverName + ":" + port) .withPassword(password) .withUsername(username); DataSource dataSource = dataSourceConfiguration.getDataSource(); try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery("select * from information_schema.mv_connection_attributes"); ) { while (rs.next()) { String attribute = rs.getString(3); String value = rs.getString(4); if (attributes.containsKey(attribute)) { assertEquals(attributes.get(attribute), value); attributes.remove(attribute); } } } assertTrue(attributes.isEmpty()); }
class SingleStoreIOConnectionAttributesIT { private static String serverName; private static String username; private static String password; private static Integer port; @BeforeClass public static void setup() throws Exception { SingleStoreIOTestPipelineOptions options; try { options = readIOTestPipelineOptions(SingleStoreIOTestPipelineOptions.class); } catch (IllegalArgumentException e) { options = null; } org.junit.Assume.assumeNotNull(options); serverName = options.getSingleStoreServerName(); username = options.getSingleStoreUsername(); password = options.getSingleStorePassword(); port = options.getSingleStorePort(); } @Test }
class SingleStoreIOConnectionAttributesIT { private static String serverName; private static String username; private static String password; private static Integer port; @BeforeClass public static void setup() throws Exception { SingleStoreIOTestPipelineOptions options; try { options = readIOTestPipelineOptions(SingleStoreIOTestPipelineOptions.class); } catch (IllegalArgumentException e) { options = null; } org.junit.Assume.assumeNotNull(options); serverName = options.getSingleStoreServerName(); username = options.getSingleStoreUsername(); password = options.getSingleStorePassword(); port = options.getSingleStorePort(); } @Test }
We are using the targetType in the switch case
private TypeTestResult checkBuiltInIntSubtypeWidenPossible(BType actualType, BType targetType) { int actualTag = getReferredType(actualType).tag; switch (targetType.tag) { case TypeTags.INT: if (actualTag == TypeTags.BYTE || TypeTags.isIntegerTypeTag(actualTag)) { return TypeTestResult.TRUE; } break; case TypeTags.SIGNED32_INT: if (actualTag == TypeTags.SIGNED16_INT || actualTag == TypeTags.SIGNED8_INT || actualTag == TypeTags.UNSIGNED16_INT || actualTag == TypeTags.UNSIGNED8_INT || actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.SIGNED16_INT: if (actualTag == TypeTags.SIGNED8_INT || actualTag == TypeTags.UNSIGNED8_INT || actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.UNSIGNED32_INT: if (actualTag == TypeTags.UNSIGNED16_INT || actualTag == TypeTags.UNSIGNED8_INT || actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.UNSIGNED16_INT: if (actualTag == TypeTags.UNSIGNED8_INT || actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.BYTE: if (actualTag == TypeTags.UNSIGNED8_INT) { return TypeTestResult.TRUE; } break; case TypeTags.UNSIGNED8_INT: if (actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.TYPEREFDESC: return checkBuiltInIntSubtypeWidenPossible(actualType, getReferredType(targetType)); } return TypeTestResult.NOT_FOUND; }
int actualTag = getReferredType(actualType).tag;
private TypeTestResult checkBuiltInIntSubtypeWidenPossible(BType actualType, BType targetType) { int actualTag = getReferredType(actualType).tag; switch (targetType.tag) { case TypeTags.INT: if (actualTag == TypeTags.BYTE || TypeTags.isIntegerTypeTag(actualTag)) { return TypeTestResult.TRUE; } break; case TypeTags.SIGNED32_INT: if (actualTag == TypeTags.SIGNED16_INT || actualTag == TypeTags.SIGNED8_INT || actualTag == TypeTags.UNSIGNED16_INT || actualTag == TypeTags.UNSIGNED8_INT || actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.SIGNED16_INT: if (actualTag == TypeTags.SIGNED8_INT || actualTag == TypeTags.UNSIGNED8_INT || actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.UNSIGNED32_INT: if (actualTag == TypeTags.UNSIGNED16_INT || actualTag == TypeTags.UNSIGNED8_INT || actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.UNSIGNED16_INT: if (actualTag == TypeTags.UNSIGNED8_INT || actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.BYTE: if (actualTag == TypeTags.UNSIGNED8_INT) { return TypeTestResult.TRUE; } break; case TypeTags.UNSIGNED8_INT: if (actualTag == TypeTags.BYTE) { return TypeTestResult.TRUE; } break; case TypeTags.TYPEREFDESC: return checkBuiltInIntSubtypeWidenPossible(actualType, getReferredType(targetType)); } return TypeTestResult.NOT_FOUND; }
class Types { private static final CompilerContext.Key<Types> TYPES_KEY = new CompilerContext.Key<>(); private final Unifier unifier; private SymbolTable symTable; private SymbolResolver symResolver; private BLangDiagnosticLog dlog; private Names names; private int finiteTypeCount = 0; private BUnionType expandedXMLBuiltinSubtypes; private final BLangAnonymousModelHelper anonymousModelHelper; private int recordCount = 0; private SymbolEnv env; public static Types getInstance(CompilerContext context) { Types types = context.get(TYPES_KEY); if (types == null) { types = new Types(context); } return types; } public Types(CompilerContext context) { context.put(TYPES_KEY, this); this.symTable = SymbolTable.getInstance(context); this.symResolver = SymbolResolver.getInstance(context); this.dlog = BLangDiagnosticLog.getInstance(context); this.names = Names.getInstance(context); this.expandedXMLBuiltinSubtypes = BUnionType.create(null, symTable.xmlElementType, symTable.xmlCommentType, symTable.xmlPIType, symTable.xmlTextType); this.unifier = new Unifier(); this.anonymousModelHelper = BLangAnonymousModelHelper.getInstance(context); } public List<BType> checkTypes(BLangExpression node, List<BType> actualTypes, List<BType> expTypes) { List<BType> resTypes = new ArrayList<>(); for (int i = 0; i < actualTypes.size(); i++) { resTypes.add(checkType(node, actualTypes.get(i), expTypes.size() > i ? expTypes.get(i) : symTable.noType)); } return resTypes; } public BType checkType(BLangExpression node, BType actualType, BType expType) { return checkType(node, actualType, expType, DiagnosticErrorCode.INCOMPATIBLE_TYPES); } public BType checkType(BLangExpression expr, BType actualType, BType expType, DiagnosticCode diagCode) { expr.setDeterminedType(actualType); expr.setTypeCheckedType(checkType(expr.pos, actualType, expType, diagCode)); if (expr.getBType().tag == TypeTags.SEMANTIC_ERROR) { return expr.getBType(); } setImplicitCastExpr(expr, actualType, expType); return expr.getBType(); } public BType checkType(Location pos, BType actualType, BType expType, DiagnosticCode diagCode) { if (expType.tag == TypeTags.SEMANTIC_ERROR) { return expType; } else if (expType.tag == TypeTags.NONE) { return actualType; } else if (actualType.tag == TypeTags.SEMANTIC_ERROR) { return actualType; } else if (isAssignable(actualType, expType)) { return actualType; } dlog.error(pos, diagCode, expType, actualType); return symTable.semanticError; } public boolean isLax(BType type) { Set<BType> visited = new HashSet<>(); int result = isLaxType(type, visited); if (result == 1) { return true; } return false; } public int isLaxType(BType type, Set<BType> visited) { if (!visited.add(type)) { return -1; } switch (type.tag) { case TypeTags.JSON: case TypeTags.XML: case TypeTags.XML_ELEMENT: return 1; case TypeTags.MAP: return isLaxType(((BMapType) type).constraint, visited); case TypeTags.UNION: if (isSameType(type, symTable.jsonType)) { visited.add(type); return 1; } boolean atleastOneLaxType = false; for (BType member : ((BUnionType) type).getMemberTypes()) { int result = isLaxType(member, visited); if (result == -1) { continue; } if (result == 0) { return 0; } atleastOneLaxType = true; } return atleastOneLaxType ? 1 : 0; } return 0; } public boolean isLaxType(BType type, Map<BType, Boolean> visited) { if (visited.containsKey(type)) { return visited.get(type); } switch (type.tag) { case TypeTags.JSON: case TypeTags.XML: case TypeTags.XML_ELEMENT: visited.put(type, true); return true; case TypeTags.MAP: boolean result = isLaxType(((BMapType) type).constraint, visited); visited.put(type, result); return result; case TypeTags.UNION: if (type == symTable.jsonType || isSameType(type, symTable.jsonType)) { visited.put(type, true); return true; } for (BType member : ((BUnionType) type).getMemberTypes()) { if (!isLaxType(member, visited)) { visited.put(type, false); return false; } } visited.put(type, true); return true; case TypeTags.TYPEREFDESC: return isLaxType(getReferredType(type), visited); } visited.put(type, false); return false; } public boolean isSameType(BType source, BType target) { return isSameType(source, target, new HashSet<>()); } public boolean isSameOrderedType(BType source, BType target) { return isSameOrderedType(source, target, new HashSet<>()); } private boolean isSameOrderedType(BType source, BType target, Set<TypePair> unresolvedTypes) { if (!unresolvedTypes.add(new TypePair(source, target))) { return true; } BTypeVisitor<BType, Boolean> orderedTypeVisitor = new BOrderedTypeVisitor(unresolvedTypes); return target.accept(orderedTypeVisitor, source); } public boolean isPureType(BType type) { IsPureTypeUniqueVisitor visitor = new IsPureTypeUniqueVisitor(); return visitor.visit(type); } public boolean isAnydata(BType type) { IsAnydataUniqueVisitor visitor = new IsAnydataUniqueVisitor(); return visitor.visit(type); } private boolean isSameType(BType source, BType target, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(source, target); if (unresolvedTypes.contains(pair)) { return true; } unresolvedTypes.add(pair); BTypeVisitor<BType, Boolean> sameTypeVisitor = new BSameTypeVisitor(unresolvedTypes); return target.accept(sameTypeVisitor, source); } public boolean isValueType(BType type) { switch (type.tag) { case TypeTags.BOOLEAN: case TypeTags.BYTE: case TypeTags.DECIMAL: case TypeTags.FLOAT: case TypeTags.INT: case TypeTags.STRING: case TypeTags.SIGNED32_INT: case TypeTags.SIGNED16_INT: case TypeTags.SIGNED8_INT: case TypeTags.UNSIGNED32_INT: case TypeTags.UNSIGNED16_INT: case TypeTags.UNSIGNED8_INT: case TypeTags.CHAR_STRING: return true; case TypeTags.TYPEREFDESC: return isValueType(getReferredType(type)); default: return false; } } boolean isBasicNumericType(BType bType) { BType type = getReferredType(bType); return type.tag < TypeTags.STRING || TypeTags.isIntegerTypeTag(type.tag); } boolean finiteTypeContainsNumericTypeValues(BFiniteType finiteType) { return finiteType.getValueSpace().stream().anyMatch(valueExpr -> isBasicNumericType(valueExpr.getBType())); } public boolean containsErrorType(BType type) { if (getReferredType(type).tag == TypeTags.UNION) { return ((BUnionType) getReferredType(type)).getMemberTypes().stream() .anyMatch(this::containsErrorType); } if (getReferredType(type).tag == TypeTags.READONLY) { return true; } return getReferredType(type).tag == TypeTags.ERROR; } public boolean isSubTypeOfList(BType type) { if (type.tag != TypeTags.UNION) { return isSubTypeOfBaseType(type, TypeTags.ARRAY) || isSubTypeOfBaseType(type, TypeTags.TUPLE); } return ((BUnionType) type).getMemberTypes().stream().allMatch(this::isSubTypeOfList); } BType resolvePatternTypeFromMatchExpr(BLangErrorBindingPattern errorBindingPattern, BLangExpression matchExpr, SymbolEnv env) { if (matchExpr == null) { return errorBindingPattern.getBType(); } BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), matchExpr.getBType(), errorBindingPattern.getBType(), env); if (intersectionType == symTable.semanticError) { return symTable.noType; } return intersectionType; } public BType resolvePatternTypeFromMatchExpr(BLangListBindingPattern listBindingPattern, BLangVarBindingPatternMatchPattern varBindingPatternMatchPattern, SymbolEnv env) { BTupleType listBindingPatternType = (BTupleType) listBindingPattern.getBType(); if (varBindingPatternMatchPattern.matchExpr == null) { return listBindingPatternType; } BType matchExprType = varBindingPatternMatchPattern.matchExpr.getBType(); BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), matchExprType, listBindingPatternType, env); if (intersectionType != symTable.semanticError) { return intersectionType; } return symTable.noType; } public BType resolvePatternTypeFromMatchExpr(BLangListMatchPattern listMatchPattern, BTupleType listMatchPatternType, SymbolEnv env) { if (listMatchPattern.matchExpr == null) { return listMatchPatternType; } BType matchExprType = listMatchPattern.matchExpr.getBType(); BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), matchExprType, listMatchPatternType, env); if (intersectionType != symTable.semanticError) { return intersectionType; } return symTable.noType; } BType resolvePatternTypeFromMatchExpr(BLangErrorMatchPattern errorMatchPattern, BLangExpression matchExpr) { if (matchExpr == null) { return errorMatchPattern.getBType(); } BType matchExprType = matchExpr.getBType(); BType patternType = errorMatchPattern.getBType(); if (isAssignable(matchExprType, patternType)) { return matchExprType; } if (isAssignable(patternType, matchExprType)) { return patternType; } return symTable.noType; } BType resolvePatternTypeFromMatchExpr(BLangConstPattern constPattern, BLangExpression constPatternExpr) { if (constPattern.matchExpr == null) { if (constPatternExpr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) { return ((BLangSimpleVarRef) constPatternExpr).symbol.type; } else { return constPatternExpr.getBType(); } } BType matchExprType = constPattern.matchExpr.getBType(); BType constMatchPatternExprType = constPatternExpr.getBType(); if (constPatternExpr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) { BLangSimpleVarRef constVarRef = (BLangSimpleVarRef) constPatternExpr; BType constVarRefSymbolType = constVarRef.symbol.type; if (isAssignable(constVarRefSymbolType, matchExprType)) { return constVarRefSymbolType; } return symTable.noType; } BLangLiteral constPatternLiteral = (BLangLiteral) constPatternExpr; if (containsAnyType(constMatchPatternExprType)) { return matchExprType; } else if (containsAnyType(matchExprType)) { return constMatchPatternExprType; } if (getReferredType(matchExprType).tag == TypeTags.BYTE && getReferredType(constMatchPatternExprType).tag == TypeTags.INT) { return matchExprType; } if (isAssignable(constMatchPatternExprType, matchExprType)) { return constMatchPatternExprType; } if (getReferredType(matchExprType).tag == TypeTags.UNION) { for (BType memberType : ((BUnionType) getReferredType(matchExprType)).getMemberTypes()) { if (getReferredType(memberType).tag == TypeTags.FINITE) { if (isAssignableToFiniteType(memberType, constPatternLiteral)) { return memberType; } } else { if (isAssignable(constMatchPatternExprType, matchExprType)) { return constMatchPatternExprType; } } } } else if (getReferredType(matchExprType).tag == TypeTags.FINITE) { if (isAssignableToFiniteType(matchExprType, constPatternLiteral)) { return matchExprType; } } return symTable.noType; } BType resolvePatternTypeFromMatchExpr(BLangMappingMatchPattern mappingMatchPattern, BType patternType, SymbolEnv env) { if (mappingMatchPattern.matchExpr == null) { return patternType; } BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), mappingMatchPattern.matchExpr.getBType(), patternType, env); if (intersectionType == symTable.semanticError) { return symTable.noType; } return intersectionType; } public BType resolvePatternTypeFromMatchExpr(BLangMappingBindingPattern mappingBindingPattern, BLangVarBindingPatternMatchPattern varBindingPatternMatchPattern, SymbolEnv env) { BRecordType mappingBindingPatternType = (BRecordType) mappingBindingPattern.getBType(); if (varBindingPatternMatchPattern.matchExpr == null) { return mappingBindingPatternType; } BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), varBindingPatternMatchPattern.matchExpr.getBType(), mappingBindingPatternType, env); if (intersectionType == symTable.semanticError) { return symTable.noType; } return intersectionType; } private boolean containsAnyType(BType type) { if (type.tag != TypeTags.UNION) { return type.tag == TypeTags.ANY; } for (BType memberTypes : ((BUnionType) type).getMemberTypes()) { if (memberTypes.tag == TypeTags.ANY) { return true; } } return false; } private boolean containsAnyDataType(BType type) { if (type.tag != TypeTags.UNION) { return type.tag == TypeTags.ANYDATA; } for (BType memberTypes : ((BUnionType) type).getMemberTypes()) { if (memberTypes.tag == TypeTags.ANYDATA) { return true; } } return false; } BType mergeTypes(BType typeFirst, BType typeSecond) { if (containsAnyType(typeFirst) && !containsErrorType(typeSecond)) { return typeSecond; } if (containsAnyType(typeSecond) && !containsErrorType(typeFirst)) { return typeFirst; } if (containsAnyDataType(typeFirst) && !containsErrorType(typeSecond)) { return typeSecond; } if (containsAnyDataType(typeSecond) && !containsErrorType(typeFirst)) { return typeFirst; } if (isSameBasicType(typeFirst, typeSecond)) { return typeFirst; } return BUnionType.create(null, typeFirst, typeSecond); } public boolean isSubTypeOfMapping(BType bType) { BType type = getReferredType(bType); if (type.tag == TypeTags.INTERSECTION) { return isSubTypeOfMapping(((BIntersectionType) type).effectiveType); } if (type.tag != TypeTags.UNION) { return isSubTypeOfBaseType(type, TypeTags.MAP) || isSubTypeOfBaseType(type, TypeTags.RECORD); } return ((BUnionType) type).getMemberTypes().stream().allMatch(this::isSubTypeOfMapping); } public boolean isSubTypeOfBaseType(BType bType, int baseTypeTag) { BType type = getReferredType(bType); if (type.tag != TypeTags.UNION) { if ((TypeTags.isIntegerTypeTag(type.tag) || type.tag == TypeTags.BYTE) && TypeTags.INT == baseTypeTag) { return true; } if (TypeTags.isStringTypeTag(type.tag) && TypeTags.STRING == baseTypeTag) { return true; } return type.tag == baseTypeTag || (baseTypeTag == TypeTags.TUPLE && type.tag == TypeTags.ARRAY) || (baseTypeTag == TypeTags.ARRAY && type.tag == TypeTags.TUPLE); } if (TypeTags.isXMLTypeTag(baseTypeTag)) { return true; } return isUnionMemberTypesSubTypeOfBaseType(((BUnionType) type).getMemberTypes(), baseTypeTag); } private boolean isUnionMemberTypesSubTypeOfBaseType(LinkedHashSet<BType> memberTypes, int baseTypeTag) { for (BType type : memberTypes) { if (!isSubTypeOfBaseType(type, baseTypeTag)) { return false; } } return true; } /** * Checks whether source type is assignable to the target type. * <p> * Source type is assignable to the target type if, * 1) the target type is any and the source type is not a value type. * 2) there exists an implicit cast symbol from source to target. * 3) both types are JSON and the target constraint is no type. * 4) both types are array type and both array types are assignable. * 5) both types are MAP and the target constraint is any type or constraints are structurally equivalent. * * @param source type. * @param target type. * @return true if source type is assignable to the target type. */ public boolean isAssignable(BType source, BType target) { return isAssignable(source, target, new HashSet<>()); } private boolean isAssignable(BType source, BType target, Set<TypePair> unresolvedTypes) { if (isSameType(source, target)) { return true; } int sourceTag = source.tag; int targetTag = target.tag; if (isNeverTypeOrStructureTypeWithARequiredNeverMember(source)) { return true; } if (sourceTag == TypeTags.TYPEREFDESC || targetTag == TypeTags.TYPEREFDESC) { return isAssignable(getReferredType(source), getReferredType(target), unresolvedTypes); } if (!Symbols.isFlagOn(source.flags, Flags.PARAMETERIZED) && !isInherentlyImmutableType(target) && Symbols.isFlagOn(target.flags, Flags.READONLY) && !isInherentlyImmutableType(source) && isMutable(source)) { return false; } if (sourceTag == TypeTags.INTERSECTION) { return isAssignable(((BIntersectionType) source).effectiveType, targetTag != TypeTags.INTERSECTION ? target : ((BIntersectionType) target).effectiveType, unresolvedTypes); } if (targetTag == TypeTags.INTERSECTION) { return isAssignable(source, ((BIntersectionType) target).effectiveType, unresolvedTypes); } if (sourceTag == TypeTags.PARAMETERIZED_TYPE) { return isParameterizedTypeAssignable(source, target, unresolvedTypes); } if (sourceTag == TypeTags.BYTE && targetTag == TypeTags.INT) { return true; } if (TypeTags.isXMLTypeTag(sourceTag) && TypeTags.isXMLTypeTag(targetTag)) { return isXMLTypeAssignable(source, target, unresolvedTypes); } if (sourceTag == TypeTags.CHAR_STRING && targetTag == TypeTags.STRING) { return true; } if (sourceTag == TypeTags.ERROR && targetTag == TypeTags.ERROR) { return isErrorTypeAssignable((BErrorType) source, (BErrorType) target, unresolvedTypes); } else if (sourceTag == TypeTags.ERROR && targetTag == TypeTags.ANY) { return false; } if (sourceTag == TypeTags.NIL && (isNullable(target) || targetTag == TypeTags.JSON)) { return true; } if (targetTag == TypeTags.ANY && !containsErrorType(source) && !isValueType(source)) { return true; } if (targetTag == TypeTags.ANYDATA && !containsErrorType(source) && isAnydata(source)) { return true; } if (targetTag == TypeTags.READONLY) { if ((isInherentlyImmutableType(source) || Symbols.isFlagOn(source.flags, Flags.READONLY))) { return true; } if (isAssignable(source, symTable.anyAndReadonlyOrError, unresolvedTypes)) { return true; } } if (sourceTag == TypeTags.READONLY && isAssignable(symTable.anyAndReadonlyOrError, target, unresolvedTypes)) { return true; } if (targetTag == TypeTags.MAP && sourceTag == TypeTags.RECORD) { BRecordType recordType = (BRecordType) source; return isAssignableRecordType(recordType, target, unresolvedTypes); } if (targetTag == TypeTags.RECORD && sourceTag == TypeTags.MAP) { return isAssignableMapType((BMapType) source, (BRecordType) target); } if (targetTag == TypeTags.TYPEDESC && sourceTag == TypeTags.TYPEDESC) { return isAssignable(((BTypedescType) source).constraint, (((BTypedescType) target).constraint), unresolvedTypes); } if (targetTag == TypeTags.TABLE && sourceTag == TypeTags.TABLE) { return isAssignableTableType((BTableType) source, (BTableType) target, unresolvedTypes); } if (targetTag == TypeTags.STREAM && sourceTag == TypeTags.STREAM) { return isAssignableStreamType((BStreamType) source, (BStreamType) target, unresolvedTypes); } if (isBuiltInTypeWidenPossible(source, target) == TypeTestResult.TRUE) { return true; } if (sourceTag == TypeTags.FINITE) { return isFiniteTypeAssignable((BFiniteType) source, target, unresolvedTypes); } if ((targetTag == TypeTags.UNION || sourceTag == TypeTags.UNION) && isAssignableToUnionType(source, target, unresolvedTypes)) { return true; } if (targetTag == TypeTags.JSON) { if (sourceTag == TypeTags.JSON) { return true; } if (sourceTag == TypeTags.TUPLE) { return isTupleTypeAssignable(source, target, unresolvedTypes); } if (sourceTag == TypeTags.ARRAY) { return isArrayTypesAssignable((BArrayType) source, target, unresolvedTypes); } if (sourceTag == TypeTags.MAP) { return isAssignable(((BMapType) source).constraint, target, unresolvedTypes); } if (sourceTag == TypeTags.RECORD) { return isAssignableRecordType((BRecordType) source, target, unresolvedTypes); } } if (targetTag == TypeTags.FUTURE && sourceTag == TypeTags.FUTURE) { if (((BFutureType) target).constraint.tag == TypeTags.NONE) { return true; } return isAssignable(((BFutureType) source).constraint, ((BFutureType) target).constraint, unresolvedTypes); } if (targetTag == TypeTags.MAP && sourceTag == TypeTags.MAP) { if (((BMapType) target).constraint.tag == TypeTags.ANY && ((BMapType) source).constraint.tag != TypeTags.UNION) { return true; } return isAssignable(((BMapType) source).constraint, ((BMapType) target).constraint, unresolvedTypes); } if ((sourceTag == TypeTags.OBJECT || sourceTag == TypeTags.RECORD) && (targetTag == TypeTags.OBJECT || targetTag == TypeTags.RECORD)) { return checkStructEquivalency(source, target, unresolvedTypes); } if (sourceTag == TypeTags.TUPLE && targetTag == TypeTags.ARRAY) { return isTupleTypeAssignableToArrayType((BTupleType) source, (BArrayType) target, unresolvedTypes); } if (sourceTag == TypeTags.ARRAY && targetTag == TypeTags.TUPLE) { return isArrayTypeAssignableToTupleType((BArrayType) source, (BTupleType) target, unresolvedTypes); } if (sourceTag == TypeTags.TUPLE || targetTag == TypeTags.TUPLE) { return isTupleTypeAssignable(source, target, unresolvedTypes); } if (sourceTag == TypeTags.INVOKABLE && targetTag == TypeTags.INVOKABLE) { return isFunctionTypeAssignable((BInvokableType) source, (BInvokableType) target, new HashSet<>()); } return sourceTag == TypeTags.ARRAY && targetTag == TypeTags.ARRAY && isArrayTypesAssignable((BArrayType) source, target, unresolvedTypes); } private boolean isMutable(BType type) { if (Symbols.isFlagOn(type.flags, Flags.READONLY)) { return false; } if (type.tag != TypeTags.UNION) { return true; } BUnionType unionType = (BUnionType) type; for (BType memberType : unionType.getMemberTypes()) { if (!Symbols.isFlagOn(memberType.flags, Flags.READONLY)) { return true; } } unionType.flags |= Flags.READONLY; BTypeSymbol tsymbol = unionType.tsymbol; if (tsymbol != null) { tsymbol.flags |= Flags.READONLY; } return false; } private boolean isParameterizedTypeAssignable(BType source, BType target, Set<TypePair> unresolvedTypes) { BType resolvedSourceType = unifier.build(source); if (target.tag != TypeTags.PARAMETERIZED_TYPE) { return isAssignable(resolvedSourceType, target, unresolvedTypes); } if (((BParameterizedType) source).paramIndex != ((BParameterizedType) target).paramIndex) { return false; } return isAssignable(resolvedSourceType, unifier.build(target), unresolvedTypes); } private boolean isAssignableRecordType(BRecordType recordType, BType type, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(recordType, type); if (!unresolvedTypes.add(pair)) { return true; } BType targetType; switch (type.tag) { case TypeTags.MAP: targetType = ((BMapType) type).constraint; break; case TypeTags.JSON: targetType = type; break; default: throw new IllegalArgumentException("Incompatible target type: " + type.toString()); } return recordFieldsAssignableToType(recordType, targetType, unresolvedTypes); } private boolean isAssignableStreamType(BStreamType sourceStreamType, BStreamType targetStreamType, Set<TypePair> unresolvedTypes) { return isAssignable(sourceStreamType.constraint, targetStreamType.constraint, unresolvedTypes) && isAssignable(sourceStreamType.completionType, targetStreamType.completionType, unresolvedTypes); } private boolean recordFieldsAssignableToType(BRecordType recordType, BType targetType, Set<TypePair> unresolvedTypes) { for (BField field : recordType.fields.values()) { if (!isAssignable(field.type, targetType, unresolvedTypes)) { return false; } } if (!recordType.sealed) { return isAssignable(recordType.restFieldType, targetType, unresolvedTypes); } return true; } private boolean isAssignableTableType(BTableType sourceTableType, BTableType targetTableType, Set<TypePair> unresolvedTypes) { if (!isAssignable(sourceTableType.constraint, targetTableType.constraint, unresolvedTypes)) { return false; } if (targetTableType.keyTypeConstraint == null && targetTableType.fieldNameList == null) { return true; } if (targetTableType.keyTypeConstraint != null) { if (sourceTableType.keyTypeConstraint != null && (isAssignable(sourceTableType.keyTypeConstraint, targetTableType.keyTypeConstraint, unresolvedTypes))) { return true; } if (sourceTableType.fieldNameList == null) { return false; } List<BType> fieldTypes = new ArrayList<>(); sourceTableType.fieldNameList.stream() .map(f -> getTableConstraintField(sourceTableType.constraint, f)) .filter(Objects::nonNull).map(f -> f.type).forEach(fieldTypes::add); if (fieldTypes.size() == 1) { return isAssignable(fieldTypes.get(0), targetTableType.keyTypeConstraint, unresolvedTypes); } BTupleType tupleType = new BTupleType(fieldTypes); return isAssignable(tupleType, targetTableType.keyTypeConstraint, unresolvedTypes); } return targetTableType.fieldNameList.equals(sourceTableType.fieldNameList); } BField getTableConstraintField(BType constraintType, String fieldName) { switch (constraintType.tag) { case TypeTags.RECORD: Map<String, BField> fieldList = ((BRecordType) constraintType).getFields(); return fieldList.get(fieldName); case TypeTags.UNION: BUnionType unionType = (BUnionType) constraintType; Set<BType> memTypes = unionType.getMemberTypes(); List<BField> fields = memTypes.stream().map(type -> getTableConstraintField(type, fieldName)) .filter(Objects::nonNull).collect(Collectors.toList()); if (fields.size() != memTypes.size()) { return null; } if (fields.stream().allMatch(field -> isAssignable(field.type, fields.get(0).type) && isAssignable(fields.get(0).type, field.type))) { return fields.get(0); } break; case TypeTags.INTERSECTION: return getTableConstraintField(((BIntersectionType) constraintType).effectiveType, fieldName); case TypeTags.TYPEREFDESC: return getTableConstraintField(((BTypeReferenceType) constraintType).referredType, fieldName); } return null; } private boolean isAssignableMapType(BMapType sourceMapType, BRecordType targetRecType) { if (targetRecType.sealed) { return false; } for (BField field : targetRecType.fields.values()) { if (!Symbols.isFlagOn(field.symbol.flags, Flags.OPTIONAL)) { return false; } if (hasIncompatibleReadOnlyFlags(field.symbol.flags, sourceMapType.flags)) { return false; } if (!isAssignable(sourceMapType.constraint, field.type)) { return false; } } return isAssignable(sourceMapType.constraint, targetRecType.restFieldType); } private boolean hasIncompatibleReadOnlyFlags(long targetFlags, long sourceFlags) { return Symbols.isFlagOn(targetFlags, Flags.READONLY) && !Symbols.isFlagOn(sourceFlags, Flags.READONLY); } private boolean isErrorTypeAssignable(BErrorType source, BErrorType target, Set<TypePair> unresolvedTypes) { if (target == symTable.errorType) { return true; } TypePair pair = new TypePair(source, target); if (unresolvedTypes.contains(pair)) { return true; } unresolvedTypes.add(pair); return isAssignable(source.detailType, target.detailType, unresolvedTypes) && target.typeIdSet.isAssignableFrom(source.typeIdSet); } private boolean isXMLTypeAssignable(BType sourceT, BType targetT, Set<TypePair> unresolvedTypes) { BType sourceType = getReferredType(sourceT); BType targetType = getReferredType(targetT); int sourceTag = sourceType.tag; int targetTag = targetType.tag; if (targetTag == TypeTags.XML) { BXMLType target = (BXMLType) targetType; if (target.constraint != null) { if (TypeTags.isXMLNonSequenceType(sourceTag)) { return isAssignable(sourceType, target.constraint, unresolvedTypes); } BXMLType source = (BXMLType) sourceType; if (source.constraint.tag == TypeTags.NEVER) { if (sourceTag == targetTag) { return true; } return isAssignable(source, target.constraint, unresolvedTypes); } return isAssignable(source.constraint, target, unresolvedTypes); } return true; } if (sourceTag == TypeTags.XML) { BXMLType source = (BXMLType) sourceType; if (targetTag == TypeTags.XML_TEXT) { if (source.constraint != null) { if (source.constraint.tag == TypeTags.NEVER || source.constraint.tag == TypeTags.XML_TEXT) { return true; } else { return isAssignable(source.constraint, targetType, unresolvedTypes); } } return false; } } return sourceTag == targetTag; } private boolean isTupleTypeAssignable(BType source, BType target, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(source, target); if (unresolvedTypes.contains(pair)) { return true; } if (source.tag == TypeTags.TUPLE && ((BTupleType) source).isCyclic) { unresolvedTypes.add(pair); } if (target.tag == TypeTags.JSON && source.tag == TypeTags.TUPLE) { BTupleType rhsTupleType = (BTupleType) source; for (BType tupleType : rhsTupleType.tupleTypes) { if (!isAssignable(tupleType, target, unresolvedTypes)) { return false; } } if (rhsTupleType.restType != null) { return isAssignable(rhsTupleType.restType, target, unresolvedTypes); } return true; } if (source.tag != TypeTags.TUPLE || target.tag != TypeTags.TUPLE) { return false; } BTupleType lhsTupleType = (BTupleType) target; BTupleType rhsTupleType = (BTupleType) source; if (lhsTupleType.restType == null && rhsTupleType.restType != null) { return false; } if (lhsTupleType.restType == null && lhsTupleType.tupleTypes.size() != rhsTupleType.tupleTypes.size()) { return false; } if (lhsTupleType.restType != null && rhsTupleType.restType != null) { if (!isAssignable(rhsTupleType.restType, lhsTupleType.restType, unresolvedTypes)) { return false; } } if (lhsTupleType.tupleTypes.size() > rhsTupleType.tupleTypes.size()) { return false; } for (int i = 0; i < rhsTupleType.tupleTypes.size(); i++) { BType lhsType = (lhsTupleType.tupleTypes.size() > i) ? lhsTupleType.tupleTypes.get(i) : lhsTupleType.restType; if (!isAssignable(rhsTupleType.tupleTypes.get(i), lhsType, unresolvedTypes)) { return false; } } return true; } private boolean checkAllTupleMembersBelongNoType(List<BType> tupleTypes) { boolean isNoType = false; for (BType memberType : tupleTypes) { switch (memberType.tag) { case TypeTags.NONE: isNoType = true; break; case TypeTags.TUPLE: isNoType = checkAllTupleMembersBelongNoType(((BTupleType) memberType).tupleTypes); if (!isNoType) { return false; } break; default: return false; } } return isNoType; } private boolean isTupleTypeAssignableToArrayType(BTupleType source, BArrayType target, Set<TypePair> unresolvedTypes) { if (target.state != BArrayState.OPEN && (source.restType != null || source.tupleTypes.size() != target.size)) { return false; } List<BType> sourceTypes = new ArrayList<>(source.tupleTypes); if (source.restType != null) { sourceTypes.add(source.restType); } return sourceTypes.stream() .allMatch(tupleElemType -> isAssignable(tupleElemType, target.eType, unresolvedTypes)); } private boolean isArrayTypeAssignableToTupleType(BArrayType source, BTupleType target, Set<TypePair> unresolvedTypes) { BType restType = target.restType; List<BType> tupleTypes = target.tupleTypes; if (source.state == BArrayState.OPEN) { if (restType == null || !tupleTypes.isEmpty()) { return false; } return isAssignable(source.eType, restType, unresolvedTypes); } int targetTupleMemberSize = tupleTypes.size(); int sourceArraySize = source.size; if (targetTupleMemberSize > sourceArraySize) { return false; } if (restType == null && targetTupleMemberSize < sourceArraySize) { return false; } BType sourceElementType = source.eType; for (BType memType : tupleTypes) { if (!isAssignable(sourceElementType, memType, unresolvedTypes)) { return false; } } if (restType == null) { return true; } return sourceArraySize == targetTupleMemberSize || isAssignable(sourceElementType, restType, unresolvedTypes); } private boolean isArrayTypesAssignable(BArrayType source, BType target, Set<TypePair> unresolvedTypes) { BType sourceElementType = source.getElementType(); if (target.tag == TypeTags.ARRAY) { BArrayType targetArrayType = (BArrayType) target; BType targetElementType = targetArrayType.getElementType(); if (targetArrayType.state == BArrayState.OPEN) { return isAssignable(sourceElementType, targetElementType, unresolvedTypes); } if (targetArrayType.size != source.size) { return false; } return isAssignable(sourceElementType, targetElementType, unresolvedTypes); } else if (target.tag == TypeTags.JSON) { return isAssignable(sourceElementType, target, unresolvedTypes); } else if (target.tag == TypeTags.ANYDATA) { return isAssignable(sourceElementType, target, unresolvedTypes); } return false; } private boolean isFunctionTypeAssignable(BInvokableType source, BInvokableType target, Set<TypePair> unresolvedTypes) { if (hasIncompatibleIsolatedFlags(source, target) || hasIncompatibleTransactionalFlags(source, target)) { return false; } if (Symbols.isFlagOn(target.flags, Flags.ANY_FUNCTION)) { return true; } if (containsTypeParams(target)) { if (source.paramTypes.size() != target.paramTypes.size()) { return false; } for (int i = 0; i < source.paramTypes.size(); i++) { BType sourceParam = source.paramTypes.get(i); BType targetParam = target.paramTypes.get(i); boolean isTypeParam = TypeParamAnalyzer.isTypeParam(targetParam); if (isTypeParam) { if (!isAssignable(sourceParam, targetParam)) { return false; } } else { if (!isAssignable(targetParam, sourceParam)) { return false; } } } if (source.retType == null && target.retType == null) { return true; } else if (source.retType == null || target.retType == null) { return false; } return isAssignable(source.retType, target.retType, unresolvedTypes); } return checkFunctionTypeEquality(source, target, unresolvedTypes, (s, t, ut) -> isAssignable(t, s, ut)); } public boolean isInherentlyImmutableType(BType type) { if (isValueType(type)) { return true; } switch (type.tag) { case TypeTags.XML_TEXT: case TypeTags.FINITE: case TypeTags.READONLY: case TypeTags.NIL: case TypeTags.ERROR: case TypeTags.INVOKABLE: case TypeTags.TYPEDESC: case TypeTags.HANDLE: return true; case TypeTags.XML: return ((BXMLType) type).constraint.tag == TypeTags.NEVER; case TypeTags.TYPEREFDESC: return isInherentlyImmutableType(((BTypeReferenceType) type).referredType); } return false; } public BType getReferredType(BType type) { BType constraint = type; if (type.tag == TypeTags.TYPEREFDESC) { constraint = ((BTypeReferenceType) type).referredType; } return constraint.tag == TypeTags.TYPEREFDESC ? getReferredType(constraint) : constraint; } public BType getReferredForAnonType(BType type) { if (anonymousModelHelper.isAnonymousType(type.tsymbol)) { return getReferredType(type); } return type; } boolean isSelectivelyImmutableType(BType type) { return isSelectivelyImmutableType(type, new HashSet<>(), false); } boolean isSelectivelyImmutableType(BType type, boolean forceCheck) { return isSelectivelyImmutableType(type, new HashSet<>(), forceCheck); } public boolean isSelectivelyImmutableType(BType type, Set<BType> unresolvedTypes) { return isSelectivelyImmutableType(type, unresolvedTypes, false); } private boolean isSelectivelyImmutableType(BType type, Set<BType> unresolvedTypes, boolean forceCheck) { return isSelectivelyImmutableType(type, false, unresolvedTypes, forceCheck); } private boolean isSelectivelyImmutableType(BType input, boolean disallowReadOnlyObjects, Set<BType> unresolvedTypes, boolean forceCheck) { BType type = getReferredType(input); if (isInherentlyImmutableType(type) || !(type instanceof SelectivelyImmutableReferenceType)) { return false; } if (!unresolvedTypes.add(type)) { return true; } if (!forceCheck && ((SelectivelyImmutableReferenceType) type).getImmutableType() != null) { return true; } switch (type.tag) { case TypeTags.ANY: case TypeTags.ANYDATA: case TypeTags.JSON: case TypeTags.XML: case TypeTags.XML_COMMENT: case TypeTags.XML_ELEMENT: case TypeTags.XML_PI: return true; case TypeTags.ARRAY: BType elementType = ((BArrayType) type).eType; return isInherentlyImmutableType(elementType) || isSelectivelyImmutableType(elementType, unresolvedTypes, forceCheck); case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) type; for (BType tupMemType : tupleType.tupleTypes) { if (!isInherentlyImmutableType(tupMemType) && !isSelectivelyImmutableType(tupMemType, unresolvedTypes, forceCheck)) { return false; } } BType tupRestType = tupleType.restType; if (tupRestType == null) { return true; } return isInherentlyImmutableType(tupRestType) || isSelectivelyImmutableType(tupRestType, unresolvedTypes, forceCheck); case TypeTags.RECORD: BRecordType recordType = (BRecordType) type; for (BField field : recordType.fields.values()) { BType fieldType = field.type; if (!isInherentlyImmutableType(fieldType) && !isSelectivelyImmutableType(fieldType, unresolvedTypes, forceCheck)) { return false; } } BType recordRestType = recordType.restFieldType; if (recordRestType == null || recordRestType == symTable.noType) { return true; } return isInherentlyImmutableType(recordRestType) || isSelectivelyImmutableType(recordRestType, unresolvedTypes, forceCheck); case TypeTags.MAP: BType constraintType = ((BMapType) type).constraint; return isInherentlyImmutableType(constraintType) || isSelectivelyImmutableType(constraintType, unresolvedTypes, forceCheck); case TypeTags.OBJECT: BObjectType objectType = (BObjectType) type; for (BField field : objectType.fields.values()) { BType fieldType = field.type; if (!isInherentlyImmutableType(fieldType) && !isSelectivelyImmutableType(fieldType, unresolvedTypes, forceCheck)) { return false; } } return true; case TypeTags.TABLE: BType tableConstraintType = ((BTableType) type).constraint; return isInherentlyImmutableType(tableConstraintType) || isSelectivelyImmutableType(tableConstraintType, unresolvedTypes, forceCheck); case TypeTags.UNION: boolean readonlyIntersectionExists = false; for (BType memberType : ((BUnionType) type).getMemberTypes()) { if (isInherentlyImmutableType(memberType) || isSelectivelyImmutableType(memberType, unresolvedTypes, forceCheck)) { readonlyIntersectionExists = true; } } return readonlyIntersectionExists; case TypeTags.INTERSECTION: return isSelectivelyImmutableType(((BIntersectionType) type).effectiveType, unresolvedTypes, forceCheck); case TypeTags.TYPEREFDESC: return isSelectivelyImmutableType(((BTypeReferenceType) type).referredType, unresolvedTypes, forceCheck); } return false; } private boolean containsTypeParams(BInvokableType type) { boolean hasParameterizedTypes = type.paramTypes.stream() .anyMatch(t -> { if (t.tag == TypeTags.FUNCTION_POINTER) { return containsTypeParams((BInvokableType) t); } return TypeParamAnalyzer.isTypeParam(t); }); if (hasParameterizedTypes) { return hasParameterizedTypes; } if (type.retType.tag == TypeTags.FUNCTION_POINTER) { return containsTypeParams((BInvokableType) type.retType); } return TypeParamAnalyzer.isTypeParam(type.retType); } private boolean isSameFunctionType(BInvokableType source, BInvokableType target, Set<TypePair> unresolvedTypes) { return checkFunctionTypeEquality(source, target, unresolvedTypes, this::isSameType); } private boolean checkFunctionTypeEquality(BInvokableType source, BInvokableType target, Set<TypePair> unresolvedTypes, TypeEqualityPredicate equality) { if (hasIncompatibleIsolatedFlags(source, target) || hasIncompatibleTransactionalFlags(source, target)) { return false; } if (Symbols.isFlagOn(target.flags, Flags.ANY_FUNCTION) && Symbols.isFlagOn(source.flags, Flags.ANY_FUNCTION)) { return true; } if (Symbols.isFlagOn(target.flags, Flags.ANY_FUNCTION) || Symbols.isFlagOn(source.flags, Flags.ANY_FUNCTION)) { return false; } if (source.paramTypes.size() != target.paramTypes.size()) { return false; } for (int i = 0; i < source.paramTypes.size(); i++) { if (!equality.test(source.paramTypes.get(i), target.paramTypes.get(i), unresolvedTypes)) { return false; } } if ((source.restType != null && target.restType == null) || target.restType != null && source.restType == null) { return false; } else if (source.restType != null && !equality.test(source.restType, target.restType, unresolvedTypes)) { return false; } if (source.retType == null && target.retType == null) { return true; } else if (source.retType == null || target.retType == null) { return false; } return isAssignable(source.retType, target.retType, unresolvedTypes); } private boolean hasIncompatibleIsolatedFlags(BInvokableType source, BInvokableType target) { return Symbols.isFlagOn(target.flags, Flags.ISOLATED) && !Symbols.isFlagOn(source.flags, Flags.ISOLATED); } private boolean hasIncompatibleTransactionalFlags(BInvokableType source, BInvokableType target) { return Symbols.isFlagOn(source.flags, Flags.TRANSACTIONAL) && !Symbols.isFlagOn(target.flags, Flags.TRANSACTIONAL); } public boolean isSameArrayType(BType source, BType target, Set<TypePair> unresolvedTypes) { if (target.tag != TypeTags.ARRAY || source.tag != TypeTags.ARRAY) { return false; } BArrayType lhsArrayType = (BArrayType) target; BArrayType rhsArrayType = (BArrayType) source; boolean hasSameTypeElements = isSameType(lhsArrayType.eType, rhsArrayType.eType, unresolvedTypes); if (lhsArrayType.state == BArrayState.OPEN) { return (rhsArrayType.state == BArrayState.OPEN) && hasSameTypeElements; } return checkSealedArraySizeEquality(rhsArrayType, lhsArrayType) && hasSameTypeElements; } public boolean isSameStreamType(BType source, BType target, Set<TypePair> unresolvedTypes) { if (target.tag != TypeTags.STREAM || source.tag != TypeTags.STREAM) { return false; } BStreamType lhsStreamType = (BStreamType) target; BStreamType rhsStreamType = (BStreamType) source; return isSameType(lhsStreamType.constraint, rhsStreamType.constraint, unresolvedTypes) && isSameType(lhsStreamType.completionType, rhsStreamType.completionType, unresolvedTypes); } public boolean checkSealedArraySizeEquality(BArrayType rhsArrayType, BArrayType lhsArrayType) { return lhsArrayType.size == rhsArrayType.size; } public boolean checkStructEquivalency(BType rhsType, BType lhsType) { return checkStructEquivalency(rhsType, lhsType, new HashSet<>()); } private boolean checkStructEquivalency(BType rhsType, BType lhsType, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(rhsType, lhsType); if (unresolvedTypes.contains(pair)) { return true; } unresolvedTypes.add(pair); if (rhsType.tag == TypeTags.OBJECT && lhsType.tag == TypeTags.OBJECT) { return checkObjectEquivalency((BObjectType) rhsType, (BObjectType) lhsType, unresolvedTypes); } if (rhsType.tag == TypeTags.RECORD && lhsType.tag == TypeTags.RECORD) { return checkRecordEquivalency((BRecordType) rhsType, (BRecordType) lhsType, unresolvedTypes); } return false; } public boolean checkObjectEquivalency(BObjectType rhsType, BObjectType lhsType, Set<TypePair> unresolvedTypes) { if (Symbols.isFlagOn(lhsType.flags, Flags.ISOLATED) && !Symbols.isFlagOn(rhsType.flags, Flags.ISOLATED)) { return false; } BObjectTypeSymbol lhsStructSymbol = (BObjectTypeSymbol) lhsType.tsymbol; BObjectTypeSymbol rhsStructSymbol = (BObjectTypeSymbol) rhsType.tsymbol; List<BAttachedFunction> lhsFuncs = lhsStructSymbol.attachedFuncs; List<BAttachedFunction> rhsFuncs = ((BObjectTypeSymbol) rhsType.tsymbol).attachedFuncs; int lhsAttachedFuncCount = getObjectFuncCount(lhsStructSymbol); int rhsAttachedFuncCount = getObjectFuncCount(rhsStructSymbol); boolean isLhsAService = Symbols.isService(lhsStructSymbol); if (isLhsAService && !Symbols.isService(rhsStructSymbol)) { return false; } if (lhsType.fields.size() > rhsType.fields.size() || lhsAttachedFuncCount > rhsAttachedFuncCount) { return false; } for (BField bField : lhsType.fields.values()) { if (Symbols.isPrivate(bField.symbol)) { return false; } } for (BAttachedFunction func : lhsFuncs) { if (Symbols.isPrivate(func.symbol)) { return false; } } for (BField lhsField : lhsType.fields.values()) { BField rhsField = rhsType.fields.get(lhsField.name.value); if (rhsField == null || !isInSameVisibilityRegion(lhsField.symbol, rhsField.symbol) || !isAssignable(rhsField.type, lhsField.type, unresolvedTypes)) { return false; } } for (BAttachedFunction lhsFunc : lhsFuncs) { if (lhsFunc == lhsStructSymbol.initializerFunc) { continue; } if (isLhsAService && Symbols.isResource(lhsFunc.symbol)) { continue; } BAttachedFunction rhsFunc = getMatchingInvokableType(rhsFuncs, lhsFunc, unresolvedTypes); if (rhsFunc == null || !isInSameVisibilityRegion(lhsFunc.symbol, rhsFunc.symbol)) { return false; } if (Symbols.isRemote(lhsFunc.symbol) != Symbols.isRemote(rhsFunc.symbol)) { return false; } } return lhsType.typeIdSet.isAssignableFrom(rhsType.typeIdSet); } private int getObjectFuncCount(BObjectTypeSymbol sym) { int count = 0; for (BAttachedFunction attachedFunc : sym.attachedFuncs) { if (!Symbols.isResource(attachedFunc.symbol)) { count++; } } if (sym.initializerFunc != null && sym.attachedFuncs.contains(sym.initializerFunc)) { return count - 1; } return count; } public boolean checkRecordEquivalency(BRecordType rhsType, BRecordType lhsType, Set<TypePair> unresolvedTypes) { if (lhsType.sealed && !rhsType.sealed) { return false; } if (!rhsType.sealed && !isAssignable(rhsType.restFieldType, lhsType.restFieldType, unresolvedTypes)) { return false; } return checkFieldEquivalency(lhsType, rhsType, unresolvedTypes); } public void setForeachTypedBindingPatternType(BLangForeach foreachNode) { BType collectionType = getReferredType(foreachNode.collection.getBType()); BType varType; switch (collectionType.tag) { case TypeTags.STRING: varType = symTable.charStringType; break; case TypeTags.ARRAY: BArrayType arrayType = (BArrayType) collectionType; varType = arrayType.eType; break; case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) collectionType; LinkedHashSet<BType> tupleTypes = new LinkedHashSet<>(tupleType.tupleTypes); if (tupleType.restType != null) { tupleTypes.add(tupleType.restType); } varType = tupleTypes.size() == 1 ? tupleTypes.iterator().next() : BUnionType.create(null, tupleTypes); break; case TypeTags.MAP: BMapType bMapType = (BMapType) collectionType; varType = bMapType.constraint; break; case TypeTags.RECORD: BRecordType recordType = (BRecordType) collectionType; varType = inferRecordFieldType(recordType); break; case TypeTags.XML: BType constraint = getReferredType(((BXMLType) collectionType).constraint); while (constraint.tag == TypeTags.XML) { collectionType = constraint; constraint = ((BXMLType) collectionType).constraint; } switch (constraint.tag) { case TypeTags.XML_ELEMENT: varType = symTable.xmlElementType; break; case TypeTags.XML_COMMENT: varType = symTable.xmlCommentType; break; case TypeTags.XML_TEXT: varType = symTable.xmlTextType; break; case TypeTags.XML_PI: varType = symTable.xmlPIType; break; case TypeTags.NEVER: varType = symTable.neverType; break; default: Set<BType> collectionTypes = getEffectiveMemberTypes((BUnionType) constraint); Set<BType> builtinXMLConstraintTypes = getEffectiveMemberTypes ((BUnionType) ((BXMLType) symTable.xmlType).constraint); if (collectionTypes.size() == 4 && builtinXMLConstraintTypes.equals(collectionTypes)) { varType = symTable.xmlType; } else { LinkedHashSet<BType> collectionTypesInSymTable = new LinkedHashSet<>(); for (BType subType : collectionTypes) { switch (subType.tag) { case TypeTags.XML_ELEMENT: collectionTypesInSymTable.add(symTable.xmlElementType); break; case TypeTags.XML_COMMENT: collectionTypesInSymTable.add(symTable.xmlCommentType); break; case TypeTags.XML_TEXT: collectionTypesInSymTable.add(symTable.xmlTextType); break; case TypeTags.XML_PI: collectionTypesInSymTable.add(symTable.xmlPIType); break; } } varType = BUnionType.create(null, collectionTypesInSymTable); } } break; case TypeTags.XML_TEXT: varType = symTable.xmlTextType; break; case TypeTags.TABLE: BTableType tableType = (BTableType) collectionType; varType = tableType.constraint; break; case TypeTags.STREAM: BStreamType streamType = (BStreamType) collectionType; if (streamType.constraint.tag == TypeTags.NONE) { varType = symTable.anydataType; break; } varType = streamType.constraint; List<BType> completionType = getAllTypes(streamType.completionType, true); if (completionType.stream().anyMatch(type -> type.tag != TypeTags.NIL)) { BType actualType = BUnionType.create(null, varType, streamType.completionType); dlog.error(foreachNode.collection.pos, DiagnosticErrorCode.INCOMPATIBLE_TYPES, varType, actualType); } break; case TypeTags.OBJECT: BUnionType nextMethodReturnType = getVarTypeFromIterableObject((BObjectType) collectionType); if (nextMethodReturnType != null) { foreachNode.resultType = getRecordType(nextMethodReturnType); BType valueType = (foreachNode.resultType != null) ? ((BRecordType) foreachNode.resultType).fields.get("value").type : null; BType errorType = getErrorType(nextMethodReturnType); if (errorType != null) { BType actualType = BUnionType.create(null, valueType, errorType); dlog.error(foreachNode.collection.pos, DiagnosticErrorCode.INVALID_ITERABLE_COMPLETION_TYPE_IN_FOREACH_NEXT_FUNCTION, actualType, errorType); } foreachNode.nillableResultType = nextMethodReturnType; foreachNode.varType = valueType; return; } case TypeTags.SEMANTIC_ERROR: foreachNode.varType = symTable.semanticError; foreachNode.resultType = symTable.semanticError; foreachNode.nillableResultType = symTable.semanticError; return; default: foreachNode.varType = symTable.semanticError; foreachNode.resultType = symTable.semanticError; foreachNode.nillableResultType = symTable.semanticError; dlog.error(foreachNode.collection.pos, DiagnosticErrorCode.ITERABLE_NOT_SUPPORTED_COLLECTION, collectionType); return; } BInvokableSymbol iteratorSymbol = (BInvokableSymbol) symResolver.lookupLangLibMethod(collectionType, names.fromString(BLangCompilerConstants.ITERABLE_COLLECTION_ITERATOR_FUNC)); BObjectType objectType = (BObjectType) getReferredType(iteratorSymbol.retType); BUnionType nextMethodReturnType = (BUnionType) getResultTypeOfNextInvocation(objectType); foreachNode.varType = varType; foreachNode.resultType = getRecordType(nextMethodReturnType); foreachNode.nillableResultType = nextMethodReturnType; } public void setInputClauseTypedBindingPatternType(BLangInputClause bLangInputClause) { if (bLangInputClause.collection == null) { return; } BType collectionType = bLangInputClause.collection.getBType(); BType varType = visitCollectionType(bLangInputClause, collectionType); if (varType.tag == TypeTags.SEMANTIC_ERROR) { return; } BInvokableSymbol iteratorSymbol = (BInvokableSymbol) symResolver.lookupLangLibMethod(collectionType, names.fromString(BLangCompilerConstants.ITERABLE_COLLECTION_ITERATOR_FUNC)); BUnionType nextMethodReturnType = (BUnionType) getResultTypeOfNextInvocation((BObjectType) getReferredType(iteratorSymbol.retType)); bLangInputClause.varType = varType; bLangInputClause.resultType = getRecordType(nextMethodReturnType); bLangInputClause.nillableResultType = nextMethodReturnType; } private BType visitCollectionType(BLangInputClause bLangInputClause, BType collectionType) { switch (collectionType.tag) { case TypeTags.STRING: return symTable.stringType; case TypeTags.ARRAY: BArrayType arrayType = (BArrayType) collectionType; return arrayType.eType; case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) collectionType; LinkedHashSet<BType> tupleTypes = new LinkedHashSet<>(tupleType.tupleTypes); if (tupleType.restType != null) { tupleTypes.add(tupleType.restType); } return tupleTypes.size() == 1 ? tupleTypes.iterator().next() : BUnionType.create(null, tupleTypes); case TypeTags.MAP: BMapType bMapType = (BMapType) collectionType; return bMapType.constraint; case TypeTags.RECORD: BRecordType recordType = (BRecordType) collectionType; return inferRecordFieldType(recordType); case TypeTags.XML: BXMLType xmlType = (BXMLType) collectionType; return xmlType.constraint; case TypeTags.XML_TEXT: return symTable.xmlTextType; case TypeTags.TABLE: BTableType tableType = (BTableType) collectionType; return tableType.constraint; case TypeTags.STREAM: BStreamType streamType = (BStreamType) collectionType; if (streamType.constraint.tag == TypeTags.NONE) { return symTable.anydataType; } return streamType.constraint; case TypeTags.OBJECT: if (!isAssignable(collectionType, symTable.iterableType)) { dlog.error(bLangInputClause.collection.pos, DiagnosticErrorCode.INVALID_ITERABLE_OBJECT_TYPE, bLangInputClause.collection.getBType(), symTable.iterableType); bLangInputClause.varType = symTable.semanticError; bLangInputClause.resultType = symTable.semanticError; bLangInputClause.nillableResultType = symTable.semanticError; break; } BUnionType nextMethodReturnType = getVarTypeFromIterableObject((BObjectType) collectionType); if (nextMethodReturnType != null) { bLangInputClause.resultType = getRecordType(nextMethodReturnType); bLangInputClause.nillableResultType = nextMethodReturnType; bLangInputClause.varType = ((BRecordType) bLangInputClause.resultType).fields.get("value").type; break; } case TypeTags.SEMANTIC_ERROR: bLangInputClause.varType = symTable.semanticError; bLangInputClause.resultType = symTable.semanticError; bLangInputClause.nillableResultType = symTable.semanticError; break; case TypeTags.TYPEREFDESC: return visitCollectionType(bLangInputClause, getReferredType(collectionType)); default: bLangInputClause.varType = symTable.semanticError; bLangInputClause.resultType = symTable.semanticError; bLangInputClause.nillableResultType = symTable.semanticError; dlog.error(bLangInputClause.collection.pos, DiagnosticErrorCode.ITERABLE_NOT_SUPPORTED_COLLECTION, collectionType); } return symTable.semanticError; } public BUnionType getVarTypeFromIterableObject(BObjectType collectionType) { BObjectTypeSymbol objectTypeSymbol = (BObjectTypeSymbol) collectionType.tsymbol; for (BAttachedFunction func : objectTypeSymbol.attachedFuncs) { if (func.funcName.value.equals(BLangCompilerConstants.ITERABLE_COLLECTION_ITERATOR_FUNC)) { return getVarTypeFromIteratorFunc(func); } } return null; } private BUnionType getVarTypeFromIteratorFunc(BAttachedFunction candidateIteratorFunc) { if (!candidateIteratorFunc.type.paramTypes.isEmpty()) { return null; } BType returnType = candidateIteratorFunc.type.retType; return getVarTypeFromIteratorFuncReturnType(returnType); } public BUnionType getVarTypeFromIteratorFuncReturnType(BType type) { BObjectTypeSymbol objectTypeSymbol; BType returnType = getReferredType(type); if (returnType.tag != TypeTags.OBJECT) { return null; } objectTypeSymbol = (BObjectTypeSymbol) returnType.tsymbol; for (BAttachedFunction func : objectTypeSymbol.attachedFuncs) { if (func.funcName.value.equals(BLangCompilerConstants.NEXT_FUNC)) { return getVarTypeFromNextFunc(func); } } return null; } private BUnionType getVarTypeFromNextFunc(BAttachedFunction nextFunc) { BType returnType; if (!nextFunc.type.paramTypes.isEmpty()) { return null; } returnType = nextFunc.type.retType; if (checkNextFuncReturnType(returnType)) { return (BUnionType) returnType; } return null; } private boolean checkNextFuncReturnType(BType returnType) { if (returnType.tag != TypeTags.UNION) { return false; } List<BType> types = getAllTypes(returnType, true); boolean containsCompletionType = types.removeIf(type -> type.tag == TypeTags.NIL); containsCompletionType = types.removeIf(type -> type.tag == TypeTags.ERROR) || containsCompletionType; if (!containsCompletionType) { return false; } if (types.size() != 1) { return false; } if (types.get(0).tag != TypeTags.RECORD) { return false; } BRecordType recordType = (BRecordType) types.get(0); return checkRecordTypeInNextFuncReturnType(recordType); } private boolean checkRecordTypeInNextFuncReturnType(BRecordType recordType) { if (!recordType.sealed) { return false; } if (recordType.fields.size() != 1) { return false; } return recordType.fields.containsKey(BLangCompilerConstants.VALUE_FIELD); } private BRecordType getRecordType(BUnionType type) { for (BType member : type.getMemberTypes()) { if (getReferredType(member).tag == TypeTags.RECORD) { return (BRecordType) getReferredType(member); } } return null; } public BErrorType getErrorType(BUnionType type) { for (BType member : type.getMemberTypes()) { member = getReferredType(member); member = getEffectiveTypeForIntersection(member); if (member.tag == TypeTags.ERROR) { return (BErrorType) member; } else if (member.tag == TypeTags.UNION) { BErrorType e = getErrorType((BUnionType) member); if (e != null) { return e; } } } return null; } public BType getResultTypeOfNextInvocation(BObjectType iteratorType) { BAttachedFunction nextFunc = getAttachedFuncFromObject(iteratorType, BLangCompilerConstants.NEXT_FUNC); return Objects.requireNonNull(nextFunc).type.retType; } public BAttachedFunction getAttachedFuncFromObject(BObjectType objectType, String funcName) { BObjectTypeSymbol iteratorSymbol = (BObjectTypeSymbol) objectType.tsymbol; for (BAttachedFunction bAttachedFunction : iteratorSymbol.attachedFuncs) { if (funcName.equals(bAttachedFunction.funcName.value)) { return bAttachedFunction; } } return null; } public BType inferRecordFieldType(BRecordType recordType) { Map<String, BField> fields = recordType.fields; BUnionType unionType = BUnionType.create(null); if (!recordType.sealed) { unionType.add(recordType.restFieldType); } else if (fields.size() == 0) { unionType.add(symTable.neverType); } for (BField field : fields.values()) { if (isAssignable(field.type, unionType)) { continue; } if (isAssignable(unionType, field.type)) { unionType = BUnionType.create(null); } unionType.add(field.type); } if (unionType.getMemberTypes().size() > 1) { unionType.tsymbol = Symbols.createTypeSymbol(SymTag.UNION_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, recordType.tsymbol.pkgID, null, recordType.tsymbol.owner, symTable.builtinPos, VIRTUAL); return unionType; } return unionType.getMemberTypes().iterator().next(); } public BType getTypeWithEffectiveIntersectionTypes(BType type) { type = getReferredType(type); if (type.tag == TypeTags.INTERSECTION) { type = ((BIntersectionType) type).effectiveType; } if (type.tag != TypeTags.UNION) { return type; } LinkedHashSet<BType> members = new LinkedHashSet<>(); boolean hasDifferentMember = false; for (BType memberType : ((BUnionType) type).getMemberTypes()) { BType effectiveType = getTypeWithEffectiveIntersectionTypes(memberType); if (effectiveType != memberType) { hasDifferentMember = true; } members.add(effectiveType); } if (hasDifferentMember) { return BUnionType.create(null, members); } return type; } /** * Enum to represent type test result. * * @since 1.2.0 */ enum TypeTestResult { NOT_FOUND, TRUE, FALSE } TypeTestResult isBuiltInTypeWidenPossible(BType actualType, BType targetType) { int targetTag = getReferredType(targetType).tag; int actualTag = getReferredType(actualType).tag; if (actualTag < TypeTags.JSON && targetTag < TypeTags.JSON) { switch (actualTag) { case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: if (targetTag == TypeTags.BOOLEAN || targetTag == TypeTags.STRING) { return TypeTestResult.FALSE; } break; case TypeTags.BOOLEAN: if (targetTag == TypeTags.INT || targetTag == TypeTags.BYTE || targetTag == TypeTags.FLOAT || targetTag == TypeTags.DECIMAL || targetTag == TypeTags.STRING) { return TypeTestResult.FALSE; } break; case TypeTags.STRING: if (targetTag == TypeTags.INT || targetTag == TypeTags.BYTE || targetTag == TypeTags.FLOAT || targetTag == TypeTags.DECIMAL || targetTag == TypeTags.BOOLEAN) { return TypeTestResult.FALSE; } break; } } switch (actualTag) { case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.BOOLEAN: case TypeTags.STRING: case TypeTags.SIGNED32_INT: case TypeTags.SIGNED16_INT: case TypeTags.SIGNED8_INT: case TypeTags.UNSIGNED32_INT: case TypeTags.UNSIGNED16_INT: case TypeTags.UNSIGNED8_INT: case TypeTags.CHAR_STRING: if (targetTag == TypeTags.JSON || targetTag == TypeTags.ANYDATA || targetTag == TypeTags.ANY || targetTag == TypeTags.READONLY) { return TypeTestResult.TRUE; } break; case TypeTags.ANYDATA: case TypeTags.TYPEDESC: if (targetTag == TypeTags.ANY) { return TypeTestResult.TRUE; } break; default: } if (TypeTags.isIntegerTypeTag(targetTag) && actualTag == targetTag) { return TypeTestResult.FALSE; } if ((TypeTags.isIntegerTypeTag(actualTag) || actualTag == TypeTags.BYTE) && (TypeTags.isIntegerTypeTag(targetTag) || targetTag == TypeTags.BYTE)) { return checkBuiltInIntSubtypeWidenPossible(actualType, targetType); } if (actualTag == TypeTags.CHAR_STRING && TypeTags.STRING == targetTag) { return TypeTestResult.TRUE; } return TypeTestResult.NOT_FOUND; } public boolean isImplicitlyCastable(BType actual, BType target) { /* The word Builtin refers for Compiler known types. */ BType targetType = getReferredType(target); BType actualType = getReferredType(actual); BType newTargetType = targetType; int targetTypeTag = targetType.tag; if ((targetTypeTag == TypeTags.UNION || targetTypeTag == TypeTags.FINITE) && isValueType(actualType)) { newTargetType = symTable.anyType; } else if (targetTypeTag == TypeTags.INTERSECTION) { newTargetType = ((BIntersectionType) targetType).effectiveType; } TypeTestResult result = isBuiltInTypeWidenPossible(actualType, newTargetType); if (result != TypeTestResult.NOT_FOUND) { return result == TypeTestResult.TRUE; } if (isValueType(targetType) && (actualType.tag == TypeTags.FINITE || (actualType.tag == TypeTags.UNION && ((BUnionType) actualType).getMemberTypes().stream() .anyMatch(type -> type.tag == TypeTags.FINITE && isAssignable(type, targetType))))) { return TypeTags.isIntegerTypeTag(targetTypeTag) || targetType.tag == TypeTags.BYTE || targetTypeTag == TypeTags.FLOAT || targetTypeTag == TypeTags.DECIMAL || TypeTags.isStringTypeTag(targetTypeTag) || targetTypeTag == TypeTags.BOOLEAN; } else if (isValueType(targetType) && actualType.tag == TypeTags.UNION && ((BUnionType) actualType).getMemberTypes().stream().allMatch(type -> isAssignable(type, targetType))) { return true; } else if (targetTypeTag == TypeTags.ERROR && (actualType.tag == TypeTags.UNION && isAllErrorMembers((BUnionType) actualType))) { return true; } return false; } public boolean isTypeCastable(BLangExpression expr, BType source, BType target, SymbolEnv env) { BType sourceType = getReferredType(source); BType targetType = getReferredType(target); if (sourceType.tag == TypeTags.SEMANTIC_ERROR || targetType.tag == TypeTags.SEMANTIC_ERROR || sourceType == targetType) { return true; } IntersectionContext intersectionContext = IntersectionContext.compilerInternalIntersectionTestContext(); BType errorIntersection = getTypeIntersection(intersectionContext, sourceType, symTable.errorType, env); if (errorIntersection != symTable.semanticError && getTypeIntersection(intersectionContext, symTable.errorType, targetType, env) == symTable.semanticError) { return false; } if (isAssignable(sourceType, targetType) || isAssignable(targetType, sourceType)) { return true; } if (isNumericConversionPossible(expr, sourceType, targetType)) { return true; } if (sourceType.tag == TypeTags.ANY && targetType.tag == TypeTags.READONLY) { return true; } boolean validTypeCast = false; if (sourceType instanceof BUnionType) { if (getTypeForUnionTypeMembersAssignableToType((BUnionType) sourceType, targetType, env, intersectionContext, new LinkedHashSet<>()) != symTable.semanticError) { validTypeCast = true; } } if (targetType instanceof BUnionType) { if (getTypeForUnionTypeMembersAssignableToType((BUnionType) targetType, sourceType, env, intersectionContext, new LinkedHashSet<>()) != symTable.semanticError) { validTypeCast = true; } } if (sourceType.tag == TypeTags.FINITE) { if (getTypeForFiniteTypeValuesAssignableToType((BFiniteType) sourceType, targetType) != symTable.semanticError) { validTypeCast = true; } } if (targetType.tag == TypeTags.FINITE) { if (getTypeForFiniteTypeValuesAssignableToType((BFiniteType) targetType, sourceType) != symTable.semanticError) { validTypeCast = true; } } if (validTypeCast) { if (isValueType(sourceType)) { setImplicitCastExpr(expr, sourceType, symTable.anyType); } return true; } return false; } boolean isNumericConversionPossible(BLangExpression expr, BType sourceType, BType targetType) { final boolean isSourceNumericType = isBasicNumericType(sourceType); final boolean isTargetNumericType = isBasicNumericType(targetType); if (isSourceNumericType && isTargetNumericType) { return true; } if (targetType.tag == TypeTags.UNION) { HashSet<Integer> typeTags = new HashSet<>(); for (BType bType : ((BUnionType) targetType).getMemberTypes()) { if (isBasicNumericType(bType)) { typeTags.add(bType.tag); if (typeTags.size() > 1) { return false; } } } } if (!isTargetNumericType && targetType.tag != TypeTags.UNION) { return false; } if (isSourceNumericType) { setImplicitCastExpr(expr, sourceType, symTable.anyType); return true; } switch (sourceType.tag) { case TypeTags.ANY: case TypeTags.ANYDATA: case TypeTags.JSON: return true; case TypeTags.UNION: for (BType memType : ((BUnionType) sourceType).getMemberTypes()) { BType referredType = getReferredType(memType); if (isBasicNumericType(referredType) || (referredType.tag == TypeTags.FINITE && finiteTypeContainsNumericTypeValues((BFiniteType) referredType))) { return true; } } break; case TypeTags.FINITE: if (finiteTypeContainsNumericTypeValues((BFiniteType) sourceType)) { return true; } break; } return false; } private boolean isAllErrorMembers(BUnionType actualType) { return actualType.getMemberTypes().stream().allMatch(t -> isAssignable(t, symTable.errorType)); } public void setImplicitCastExpr(BLangExpression expr, BType actualType, BType targetType) { BType expType = getReferredType(targetType); if (!isImplicitlyCastable(actualType, expType)) { return; } BLangTypeConversionExpr implicitConversionExpr = (BLangTypeConversionExpr) TreeBuilder.createTypeConversionNode(); implicitConversionExpr.pos = expr.pos; implicitConversionExpr.expr = expr.impConversionExpr == null ? expr : expr.impConversionExpr; implicitConversionExpr.setBType(expType); implicitConversionExpr.targetType = expType; implicitConversionExpr.internal = true; expr.impConversionExpr = implicitConversionExpr; } public BType getElementType(BType type) { if (type.tag != TypeTags.ARRAY) { return type; } return getElementType(((BArrayType) type).getElementType()); } public boolean checkListenerCompatibilityAtServiceDecl(BType type) { if (type.tag == TypeTags.UNION) { int listenerCompatibleTypeCount = 0; for (BType memberType : ((BUnionType) type).getMemberTypes()) { if (memberType.tag != TypeTags.ERROR) { if (!checkListenerCompatibility(memberType)) { return false; } listenerCompatibleTypeCount++; } } return listenerCompatibleTypeCount > 0; } return checkListenerCompatibility(type); } public boolean checkListenerCompatibility(BType bType) { BType type = getReferredType(bType); if (type.tag == TypeTags.UNION) { BUnionType unionType = (BUnionType) type; for (BType memberType : unionType.getMemberTypes()) { if (!checkListenerCompatibility(memberType)) { return false; } } return true; } if (type.tag != TypeTags.OBJECT) { return false; } BObjectType rhsType = (BObjectType) type; List<BAttachedFunction> rhsFuncs = ((BStructureTypeSymbol) rhsType.tsymbol).attachedFuncs; ListenerValidationModel listenerValidationModel = new ListenerValidationModel(this, symTable); return listenerValidationModel.checkMethods(rhsFuncs); } public boolean isValidErrorDetailType(BType detailType) { switch (detailType.tag) { case TypeTags.TYPEREFDESC: return isValidErrorDetailType(((BTypeReferenceType) detailType).referredType); case TypeTags.MAP: case TypeTags.RECORD: return isAssignable(detailType, symTable.detailType); } return false; } private boolean isSealedRecord(BType recordType) { return recordType.getKind() == TypeKind.RECORD && ((BRecordType) recordType).sealed; } private boolean isNullable(BType fieldType) { return fieldType.isNullable(); } private class BSameTypeVisitor implements BTypeVisitor<BType, Boolean> { Set<TypePair> unresolvedTypes; BSameTypeVisitor(Set<TypePair> unresolvedTypes) { this.unresolvedTypes = unresolvedTypes; } @Override public Boolean visit(BType target, BType source) { BType t = getReferredType(target); BType s = getReferredType(source); if (t == s) { return true; } switch (t.tag) { case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.STRING: case TypeTags.BOOLEAN: return t.tag == s.tag && ((TypeParamAnalyzer.isTypeParam(t) || TypeParamAnalyzer.isTypeParam(s)) || (t instanceof BTypeReferenceType || s instanceof BTypeReferenceType)); case TypeTags.ANY: case TypeTags.ANYDATA: return t.tag == s.tag && hasSameReadonlyFlag(s, t) && (TypeParamAnalyzer.isTypeParam(t) || TypeParamAnalyzer.isTypeParam(s)); default: break; } return false; } @Override public Boolean visit(BBuiltInRefType t, BType s) { return t == s; } @Override public Boolean visit(BAnyType t, BType s) { return t == s; } @Override public Boolean visit(BAnydataType t, BType s) { if (t == s) { return true; } return t.tag == s.tag; } @Override public Boolean visit(BMapType t, BType s) { if (s.tag != TypeTags.MAP || !hasSameReadonlyFlag(s, t)) { return false; } BMapType sType = ((BMapType) s); return isSameType(sType.constraint, t.constraint, this.unresolvedTypes); } @Override public Boolean visit(BFutureType t, BType s) { return s.tag == TypeTags.FUTURE && isSameType(t.constraint, ((BFutureType) s).constraint, this.unresolvedTypes); } @Override public Boolean visit(BXMLType t, BType s) { return visit((BBuiltInRefType) t, s); } @Override public Boolean visit(BJSONType t, BType s) { return s.tag == TypeTags.JSON && hasSameReadonlyFlag(s, t); } @Override public Boolean visit(BArrayType t, BType s) { return s.tag == TypeTags.ARRAY && hasSameReadonlyFlag(s, t) && isSameArrayType(s, t, this.unresolvedTypes); } @Override public Boolean visit(BObjectType t, BType s) { if (t == s) { return true; } if (s.tag != TypeTags.OBJECT) { return false; } return t.tsymbol.pkgID.equals(s.tsymbol.pkgID) && t.tsymbol.name.equals(s.tsymbol.name); } @Override public Boolean visit(BRecordType t, BType s) { if (t == s) { return true; } if (s.tag != TypeTags.RECORD || !hasSameReadonlyFlag(s, t)) { return false; } BRecordType source = (BRecordType) s; if (source.fields.size() != t.fields.size()) { return false; } for (BField sourceField : source.fields.values()) { if (t.fields.containsKey(sourceField.name.value)) { BField targetField = t.fields.get(sourceField.name.value); if (isSameType(sourceField.type, targetField.type, this.unresolvedTypes) && hasSameOptionalFlag(sourceField.symbol, targetField.symbol) && (!Symbols.isFlagOn(targetField.symbol.flags, Flags.READONLY) || Symbols.isFlagOn(sourceField.symbol.flags, Flags.READONLY))) { continue; } } return false; } return isSameType(source.restFieldType, t.restFieldType, this.unresolvedTypes); } private boolean hasSameOptionalFlag(BVarSymbol s, BVarSymbol t) { return ((s.flags & Flags.OPTIONAL) ^ (t.flags & Flags.OPTIONAL)) != Flags.OPTIONAL; } private boolean hasSameReadonlyFlag(BType source, BType target) { return Symbols.isFlagOn(target.flags, Flags.READONLY) == Symbols.isFlagOn(source.flags, Flags.READONLY); } public Boolean visit(BTupleType t, BType s) { if (((!t.tupleTypes.isEmpty() && checkAllTupleMembersBelongNoType(t.tupleTypes)) || (t.restType != null && t.restType.tag == TypeTags.NONE)) && !(s.tag == TypeTags.ARRAY && ((BArrayType) s).state == BArrayState.OPEN)) { return true; } if (s.tag != TypeTags.TUPLE || !hasSameReadonlyFlag(s, t)) { return false; } BTupleType source = (BTupleType) s; if (source.tupleTypes.size() != t.tupleTypes.size()) { return false; } BType sourceRestType = source.restType; BType targetRestType = t.restType; if ((sourceRestType == null || targetRestType == null) && sourceRestType != targetRestType) { return false; } for (int i = 0; i < source.tupleTypes.size(); i++) { if (t.getTupleTypes().get(i) == symTable.noType) { continue; } if (!isSameType(source.getTupleTypes().get(i), t.tupleTypes.get(i), this.unresolvedTypes)) { return false; } } if (sourceRestType == null || targetRestType == symTable.noType) { return true; } return isSameType(sourceRestType, targetRestType, this.unresolvedTypes); } @Override public Boolean visit(BStreamType t, BType s) { return s.tag == TypeTags.STREAM && isSameStreamType(s, t, this.unresolvedTypes); } @Override public Boolean visit(BTableType t, BType s) { return t == s; } @Override public Boolean visit(BInvokableType t, BType s) { return s.tag == TypeTags.INVOKABLE && isSameFunctionType((BInvokableType) s, t, this.unresolvedTypes); } @Override public Boolean visit(BUnionType tUnionType, BType s) { if (s.tag != TypeTags.UNION || !hasSameReadonlyFlag(s, tUnionType)) { return false; } BUnionType sUnionType = (BUnionType) s; if (sUnionType.getMemberTypes().size() != tUnionType.getMemberTypes().size()) { return false; } Set<BType> sourceTypes = new LinkedHashSet<>(sUnionType.getMemberTypes().size()); Set<BType> targetTypes = new LinkedHashSet<>(tUnionType.getMemberTypes().size()); sourceTypes.add(sUnionType); sourceTypes.addAll(sUnionType.getMemberTypes()); targetTypes.add(tUnionType); targetTypes.addAll(tUnionType.getMemberTypes()); boolean notSameType = sourceTypes .stream() .map(sT -> targetTypes .stream() .anyMatch(it -> isSameType(it, sT, this.unresolvedTypes))) .anyMatch(foundSameType -> !foundSameType); return !notSameType; } @Override public Boolean visit(BIntersectionType tIntersectionType, BType s) { if (s.tag != TypeTags.INTERSECTION || !hasSameReadonlyFlag(s, tIntersectionType)) { return false; } BIntersectionType sIntersectionType = (BIntersectionType) s; if (sIntersectionType.getConstituentTypes().size() != tIntersectionType.getConstituentTypes().size()) { return false; } Set<BType> sourceTypes = new LinkedHashSet<>(sIntersectionType.getConstituentTypes()); Set<BType> targetTypes = new LinkedHashSet<>(tIntersectionType.getConstituentTypes()); for (BType sourceType : sourceTypes) { boolean foundSameType = false; for (BType targetType : targetTypes) { if (isSameType(sourceType, targetType, this.unresolvedTypes)) { foundSameType = true; break; } } if (!foundSameType) { return false; } } return true; } @Override public Boolean visit(BErrorType t, BType s) { if (s.tag != TypeTags.ERROR) { return false; } BErrorType source = (BErrorType) s; if (!source.typeIdSet.equals(t.typeIdSet)) { return false; } if (source.detailType == t.detailType) { return true; } return isSameType(source.detailType, t.detailType, this.unresolvedTypes); } @Override public Boolean visit(BTypedescType t, BType s) { if (s.tag != TypeTags.TYPEDESC) { return false; } BTypedescType sType = ((BTypedescType) s); return isSameType(sType.constraint, t.constraint, this.unresolvedTypes); } @Override public Boolean visit(BFiniteType t, BType s) { return s == t; } @Override public Boolean visit(BParameterizedType t, BType s) { if (s.tag != TypeTags.PARAMETERIZED_TYPE) { return false; } BParameterizedType sType = (BParameterizedType) s; return isSameType(sType.paramValueType, t.paramValueType) && sType.paramSymbol.equals(t.paramSymbol); } }; private class BOrderedTypeVisitor implements BTypeVisitor<BType, Boolean> { Set<TypePair> unresolvedTypes; BOrderedTypeVisitor(Set<TypePair> unresolvedTypes) { this.unresolvedTypes = unresolvedTypes; } @Override public Boolean visit(BType target, BType source) { int sourceTag = getReferredType(source).tag; int targetTag = getReferredType(target).tag; if (sourceTag == TypeTags.INTERSECTION || targetTag == TypeTags.INTERSECTION) { sourceTag = getEffectiveTypeForIntersection(getReferredType(source)).tag; targetTag = getEffectiveTypeForIntersection(getReferredType(target)).tag; } if (isSimpleBasicType(sourceTag) && isSimpleBasicType(targetTag)) { return (source == target) || isIntOrStringType(sourceTag, targetTag); } if (sourceTag == TypeTags.FINITE) { return checkValueSpaceHasSameType(((BFiniteType) getReferredType(source)), getReferredType(target)); } return isSameOrderedType(getReferredType(target), getReferredType(source), this.unresolvedTypes); } @Override public Boolean visit(BArrayType target, BType source) { if (source.tag != TypeTags.ARRAY) { return false; } BArrayType rhsArrayType = (BArrayType) source; boolean hasSameOrderedTypeElements = isSameOrderedType(target.eType, rhsArrayType.eType, unresolvedTypes); if (target.state == BArrayState.OPEN) { return (rhsArrayType.state == BArrayState.OPEN) && hasSameOrderedTypeElements; } return hasSameOrderedTypeElements; } @Override public Boolean visit(BTupleType target, BType source) { if (source.tag != TypeTags.TUPLE || !hasSameReadonlyFlag(source, target)) { return false; } BTupleType sourceT = (BTupleType) source; BType sourceRestType = sourceT.restType; BType targetRestType = target.restType; int sourceTupleCount = sourceT.tupleTypes.size(); int targetTupleCount = target.tupleTypes.size(); int len = Math.min(sourceTupleCount, targetTupleCount); for (int i = 0; i < len; i++) { if (!isSameOrderedType(sourceT.getTupleTypes().get(i), target.tupleTypes.get(i), this.unresolvedTypes)) { return false; } } if (sourceTupleCount == targetTupleCount) { if (sourceRestType == null || targetRestType == null) { return true; } return isSameOrderedType(sourceRestType, targetRestType, this.unresolvedTypes); } if (sourceTupleCount > targetTupleCount) { return checkSameOrderedTypeInTuples(sourceT, sourceTupleCount, targetTupleCount, sourceRestType, targetRestType); } return checkSameOrderedTypeInTuples(target, targetTupleCount, sourceTupleCount, targetRestType, sourceRestType); } private boolean checkSameOrderedTypeInTuples(BTupleType source, int sourceTupleCount, int targetTupleCount, BType sourceRestType, BType targetRestType) { if (targetRestType == null) { return true; } for (int i = targetTupleCount; i < sourceTupleCount; i++) { if (!isSameOrderedType(source.getTupleTypes().get(i), targetRestType, this.unresolvedTypes)) { return false; } } if (sourceRestType == null) { return true; } return isSameOrderedType(sourceRestType, targetRestType, this.unresolvedTypes); } @Override public Boolean visit(BUnionType target, BType source) { if (source.tag != TypeTags.UNION || !hasSameReadonlyFlag(source, target)) { return checkUnionHasSameType(target.getMemberTypes(), source); } BUnionType sUnionType = (BUnionType) source; LinkedHashSet<BType> sourceTypes = sUnionType.getMemberTypes(); LinkedHashSet<BType> targetTypes = target.getMemberTypes(); if (checkUnionHasAllFiniteOrNilMembers(sourceTypes) && checkUnionHasAllFiniteOrNilMembers(targetTypes)) { if (sourceTypes.contains(symTable.nilType) != targetTypes.contains(symTable.nilType)) { return false; } return checkValueSpaceHasSameType(((BFiniteType) target.getMemberTypes().iterator().next()), sUnionType.getMemberTypes().iterator().next()); } if (sUnionType.getMemberTypes().size() != target.getMemberTypes().size()) { return false; } return checkSameOrderedTypesInUnionMembers(sourceTypes, targetTypes); } private boolean checkSameOrderedTypesInUnionMembers(LinkedHashSet<BType> sourceTypes, LinkedHashSet<BType> targetTypes) { for (BType sourceT : sourceTypes) { boolean foundSameOrderedType = false; for (BType targetT : targetTypes) { if (isSameOrderedType(targetT, sourceT, this.unresolvedTypes)) { foundSameOrderedType = true; break; } } if (!foundSameOrderedType) { return false; } } return true; } @Override public Boolean visit(BFiniteType t, BType s) { return checkValueSpaceHasSameType(t, s); } private boolean hasSameReadonlyFlag(BType source, BType target) { return Symbols.isFlagOn(target.flags, Flags.READONLY) == Symbols.isFlagOn(source.flags, Flags.READONLY); } @Override public Boolean visit(BBuiltInRefType t, BType s) { return false; } @Override public Boolean visit(BAnyType t, BType s) { return false; } @Override public Boolean visit(BAnydataType t, BType s) { return false; } @Override public Boolean visit(BMapType t, BType s) { return false; } @Override public Boolean visit(BFutureType t, BType s) { return false; } @Override public Boolean visit(BXMLType t, BType s) { return false; } @Override public Boolean visit(BJSONType t, BType s) { return false; } @Override public Boolean visit(BObjectType t, BType s) { return false; } @Override public Boolean visit(BRecordType t, BType s) { return false; } @Override public Boolean visit(BStreamType t, BType s) { return false; } @Override public Boolean visit(BTableType t, BType s) { return false; } @Override public Boolean visit(BInvokableType t, BType s) { return false; } @Override public Boolean visit(BIntersectionType tIntersectionType, BType s) { return false; } @Override public Boolean visit(BErrorType t, BType s) { return false; } @Override public Boolean visit(BTypedescType t, BType s) { return false; } @Override public Boolean visit(BParameterizedType t, BType s) { return false; } }; private boolean checkUnionHasSameType(LinkedHashSet<BType> memberTypes, BType baseType) { boolean isSameType = false; for (BType type : memberTypes) { type = getReferredType(type); if (type.tag == TypeTags.FINITE) { for (BLangExpression expr : ((BFiniteType) type).getValueSpace()) { isSameType = isSameOrderedType(expr.getBType(), baseType); if (!isSameType) { return false; } } } else if (type.tag == TypeTags.UNION) { return checkUnionHasSameType((LinkedHashSet<BType>) ((UnionType) type).getMemberTypes(), baseType); } else if (isSimpleBasicType(type.tag)) { isSameType = isSameOrderedType(type, baseType); if (!isSameType) { return false; } } } return isSameType; } private boolean checkValueSpaceHasSameType(BFiniteType finiteType, BType type) { BType baseType = getReferredType(type); if (baseType.tag == TypeTags.FINITE) { BType baseExprType = finiteType.getValueSpace().iterator().next().getBType(); return checkValueSpaceHasSameType(((BFiniteType) baseType), baseExprType); } boolean isValueSpaceSameType = false; for (BLangExpression expr : finiteType.getValueSpace()) { isValueSpaceSameType = isSameOrderedType(expr.getBType(), baseType); if (!isValueSpaceSameType) { break; } } return isValueSpaceSameType; } private boolean checkUnionHasAllFiniteOrNilMembers(LinkedHashSet<BType> memberTypes) { for (BType type : memberTypes) { if (type.tag != TypeTags.FINITE && type.tag != TypeTags.NIL) { return false; } } return true; } private boolean checkFieldEquivalency(BRecordType lhsType, BRecordType rhsType, Set<TypePair> unresolvedTypes) { Map<String, BField> rhsFields = new LinkedHashMap<>(rhsType.fields); for (BField lhsField : lhsType.fields.values()) { BField rhsField = rhsFields.get(lhsField.name.value); if (rhsField == null) { if (!Symbols.isOptional(lhsField.symbol)) { return false; } continue; } if (hasIncompatibleReadOnlyFlags(lhsField.symbol.flags, rhsField.symbol.flags)) { return false; } if (!Symbols.isOptional(lhsField.symbol) && Symbols.isOptional(rhsField.symbol)) { return false; } if (!isAssignable(rhsField.type, lhsField.type, unresolvedTypes)) { return false; } rhsFields.remove(lhsField.name.value); } if (lhsType.sealed) { for (BField field : rhsFields.values()) { if (!isNeverTypeOrStructureTypeWithARequiredNeverMember(field.type)) { return false; } } return true; } BType lhsRestFieldType = lhsType.restFieldType; for (BField field : rhsFields.values()) { if (!isAssignable(field.type, lhsRestFieldType, unresolvedTypes)) { return false; } } return true; } private BAttachedFunction getMatchingInvokableType(List<BAttachedFunction> rhsFuncList, BAttachedFunction lhsFunc, Set<TypePair> unresolvedTypes) { return rhsFuncList.stream() .filter(rhsFunc -> lhsFunc.funcName.equals(rhsFunc.funcName)) .filter(rhsFunc -> isFunctionTypeAssignable(rhsFunc.type, lhsFunc.type, unresolvedTypes)) .findFirst() .orElse(null); } private boolean isInSameVisibilityRegion(BSymbol lhsSym, BSymbol rhsSym) { if (Symbols.isPrivate(lhsSym)) { return Symbols.isPrivate(rhsSym) && lhsSym.pkgID.equals(rhsSym.pkgID) && lhsSym.owner.name.equals(rhsSym.owner.name); } else if (Symbols.isPublic(lhsSym)) { return Symbols.isPublic(rhsSym); } return !Symbols.isPrivate(rhsSym) && !Symbols.isPublic(rhsSym) && lhsSym.pkgID.equals(rhsSym.pkgID); } private boolean isAssignableToUnionType(BType source, BType target, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(source, target); if (unresolvedTypes.contains(pair)) { return true; } if (source.tag == TypeTags.UNION && ((BUnionType) source).isCyclic) { unresolvedTypes.add(pair); } Set<BType> sourceTypes = new LinkedHashSet<>(); Set<BType> targetTypes = new LinkedHashSet<>(); if (source.tag == TypeTags.UNION || source.tag == TypeTags.JSON || source.tag == TypeTags.ANYDATA) { sourceTypes.addAll(getEffectiveMemberTypes((BUnionType) source)); } else { sourceTypes.add(source); } boolean targetIsAUnion = false; if (target.tag == TypeTags.UNION) { targetIsAUnion = true; targetTypes.addAll(getEffectiveMemberTypes((BUnionType) target)); } else { targetTypes.add(target); } var sourceIterator = sourceTypes.iterator(); while (sourceIterator.hasNext()) { BType sMember = sourceIterator.next(); if (sMember.tag == TypeTags.NEVER) { sourceIterator.remove(); continue; } if (sMember.tag == TypeTags.FINITE && isAssignable(sMember, target, unresolvedTypes)) { sourceIterator.remove(); continue; } if (sMember.tag == TypeTags.XML && isAssignableToUnionType(expandedXMLBuiltinSubtypes, target, unresolvedTypes)) { sourceIterator.remove(); continue; } if (!isValueType(sMember)) { if (!targetIsAUnion) { continue; } BUnionType targetUnion = (BUnionType) target; if (sMember instanceof BUnionType) { BUnionType sUnion = (BUnionType) sMember; if (sUnion.isCyclic && targetUnion.isCyclic) { unresolvedTypes.add(new TypePair(sUnion, targetUnion)); if (isAssignable(sUnion, targetUnion, unresolvedTypes)) { sourceIterator.remove(); continue; } } if (sMember.tag == TypeTags.JSON && isAssignable(sUnion, targetUnion, unresolvedTypes)) { sourceIterator.remove(); continue; } } if (sMember.tag == TypeTags.READONLY) { unresolvedTypes.add(new TypePair(sMember, targetUnion)); if (isAssignable(sMember, targetUnion, unresolvedTypes)) { sourceIterator.remove(); continue; } } continue; } boolean sourceTypeIsNotAssignableToAnyTargetType = true; var targetIterator = targetTypes.iterator(); while (targetIterator.hasNext()) { BType t = targetIterator.next(); if (isAssignable(sMember, t, unresolvedTypes)) { sourceIterator.remove(); sourceTypeIsNotAssignableToAnyTargetType = false; break; } } if (sourceTypeIsNotAssignableToAnyTargetType) { return false; } } sourceIterator = sourceTypes.iterator(); while (sourceIterator.hasNext()) { BType sourceMember = sourceIterator.next(); boolean sourceTypeIsNotAssignableToAnyTargetType = true; var targetIterator = targetTypes.iterator(); boolean selfReferencedSource = (sourceMember != source) && isSelfReferencedStructuredType(source, sourceMember); while (targetIterator.hasNext()) { BType targetMember = targetIterator.next(); boolean selfReferencedTarget = isSelfReferencedStructuredType(target, targetMember); if (selfReferencedTarget && selfReferencedSource && (sourceMember.tag == targetMember.tag)) { sourceTypeIsNotAssignableToAnyTargetType = false; break; } if (isAssignable(sourceMember, targetMember, unresolvedTypes)) { sourceTypeIsNotAssignableToAnyTargetType = false; break; } } if (sourceTypeIsNotAssignableToAnyTargetType) { return false; } } unresolvedTypes.add(pair); return true; } public boolean isSelfReferencedStructuredType(BType source, BType s) { if (source == s) { return true; } if (s.tag == TypeTags.ARRAY) { return isSelfReferencedStructuredType(source, ((BArrayType) s).eType); } if (s.tag == TypeTags.MAP) { return isSelfReferencedStructuredType(source, ((BMapType) s).constraint); } if (s.tag == TypeTags.TABLE) { return isSelfReferencedStructuredType(source, ((BTableType) s).constraint); } return false; } public BType updateSelfReferencedWithNewType(BType source, BType s, BType target) { if (s.tag == TypeTags.ARRAY) { BArrayType arrayType = (BArrayType) s; if (arrayType.eType == source) { return new BArrayType(target, arrayType.tsymbol, arrayType.size, arrayType.state, arrayType.flags); } } if (s.tag == TypeTags.MAP) { BMapType mapType = (BMapType) s; if (mapType.constraint == source) { return new BMapType(mapType.tag, target, mapType.tsymbol, mapType.flags); } } if (s.tag == TypeTags.TABLE) { BTableType tableType = (BTableType) s; if (tableType.constraint == source) { return new BTableType(tableType.tag, target, tableType.tsymbol, tableType.flags); } else if (tableType.constraint instanceof BMapType) { return updateSelfReferencedWithNewType(source, (BMapType) tableType.constraint, target); } } return s; } public static void fixSelfReferencingSameUnion(BType originalMemberType, BUnionType origUnionType, BType immutableMemberType, BUnionType newImmutableUnion, LinkedHashSet<BType> readOnlyMemTypes) { boolean sameMember = originalMemberType == immutableMemberType; if (originalMemberType.tag == TypeTags.ARRAY) { var arrayType = (BArrayType) originalMemberType; if (origUnionType == arrayType.eType) { if (sameMember) { BArrayType newArrayType = new BArrayType(newImmutableUnion, arrayType.tsymbol, arrayType.size, arrayType.state, arrayType.flags); readOnlyMemTypes.add(newArrayType); } else { ((BArrayType) immutableMemberType).eType = newImmutableUnion; readOnlyMemTypes.add(immutableMemberType); } } } else if (originalMemberType.tag == TypeTags.MAP) { var mapType = (BMapType) originalMemberType; if (origUnionType == mapType.constraint) { if (sameMember) { BMapType newMapType = new BMapType(mapType.tag, newImmutableUnion, mapType.tsymbol, mapType.flags); readOnlyMemTypes.add(newMapType); } else { ((BMapType) immutableMemberType).constraint = newImmutableUnion; readOnlyMemTypes.add(immutableMemberType); } } } else if (originalMemberType.tag == TypeTags.TABLE) { var tableType = (BTableType) originalMemberType; if (origUnionType == tableType.constraint) { if (sameMember) { BTableType newTableType = new BTableType(tableType.tag, newImmutableUnion, tableType.tsymbol, tableType.flags); readOnlyMemTypes.add(newTableType); } else { ((BTableType) immutableMemberType).constraint = newImmutableUnion; readOnlyMemTypes.add(immutableMemberType); } return; } var immutableConstraint = ((BTableType) immutableMemberType).constraint; if (tableType.constraint.tag == TypeTags.MAP) { sameMember = tableType.constraint == immutableConstraint; var mapType = (BMapType) tableType.constraint; if (origUnionType == mapType.constraint) { if (sameMember) { BMapType newMapType = new BMapType(mapType.tag, newImmutableUnion, mapType.tsymbol, mapType.flags); ((BTableType) immutableMemberType).constraint = newMapType; } else { ((BTableType) immutableMemberType).constraint = newImmutableUnion; } readOnlyMemTypes.add(immutableMemberType); } } } else { readOnlyMemTypes.add(immutableMemberType); } } private Set<BType> getEffectiveMemberTypes(BUnionType unionType) { Set<BType> memTypes = new LinkedHashSet<>(); for (BType memberType : unionType.getMemberTypes()) { switch (memberType.tag) { case TypeTags.INTERSECTION: BType effectiveType = ((BIntersectionType) memberType).effectiveType; if (effectiveType.tag == TypeTags.UNION) { memTypes.addAll(getEffectiveMemberTypes((BUnionType) effectiveType)); continue; } memTypes.add(effectiveType); break; case TypeTags.UNION: memTypes.addAll(getEffectiveMemberTypes((BUnionType) memberType)); break; case TypeTags.TYPEREFDESC: BType constraint = getReferredType(memberType); if (constraint.tag == TypeTags.UNION) { memTypes.addAll(getEffectiveMemberTypes((BUnionType) constraint)); continue; } memTypes.add(constraint); break; default: memTypes.add(memberType); break; } } return memTypes; } private boolean isFiniteTypeAssignable(BFiniteType finiteType, BType targetType, Set<TypePair> unresolvedTypes) { BType expType = getReferredType(targetType); if (expType.tag == TypeTags.FINITE) { return finiteType.getValueSpace().stream() .allMatch(expression -> isAssignableToFiniteType(expType, (BLangLiteral) expression)); } if (targetType.tag == TypeTags.UNION) { List<BType> unionMemberTypes = getAllTypes(targetType, true); return finiteType.getValueSpace().stream() .allMatch(valueExpr -> unionMemberTypes.stream() .anyMatch(targetMemType -> getReferredType(targetMemType).tag == TypeTags.FINITE ? isAssignableToFiniteType(getReferredType(targetMemType), (BLangLiteral) valueExpr) : isAssignable(valueExpr.getBType(), getReferredType(targetMemType), unresolvedTypes) || isLiteralCompatibleWithBuiltinTypeWithSubTypes( (BLangLiteral) valueExpr, getReferredType(targetMemType)))); } for (BLangExpression expression : finiteType.getValueSpace()) { if (!isLiteralCompatibleWithBuiltinTypeWithSubTypes((BLangLiteral) expression, targetType) && !isAssignable(expression.getBType(), expType, unresolvedTypes)) { return false; } } return true; } boolean isAssignableToFiniteType(BType type, BLangLiteral literalExpr) { type = getReferredType(type); if (type.tag != TypeTags.FINITE) { return false; } BFiniteType expType = (BFiniteType) type; return expType.getValueSpace().stream().anyMatch(memberLiteral -> { if (((BLangLiteral) memberLiteral).value == null) { return literalExpr.value == null; } return checkLiteralAssignabilityBasedOnType((BLangLiteral) memberLiteral, literalExpr); }); } /** * Method to check the literal assignability based on the types of the literals. For numeric literals the * assignability depends on the equivalency of the literals. If the candidate literal could either be a simple * literal or a constant. In case of a constant, it is assignable to the base literal if and only if both * literals have same type and equivalent values. * * @param baseLiteral Literal based on which we check the assignability. * @param candidateLiteral Literal to be tested whether it is assignable to the base literal or not. * @return true if assignable; false otherwise. */ boolean checkLiteralAssignabilityBasedOnType(BLangLiteral baseLiteral, BLangLiteral candidateLiteral) { if (baseLiteral.getKind() != candidateLiteral.getKind()) { return false; } Object baseValue = baseLiteral.value; Object candidateValue = candidateLiteral.value; int candidateTypeTag = candidateLiteral.getBType().tag; switch (baseLiteral.getBType().tag) { case TypeTags.BYTE: if (candidateTypeTag == TypeTags.BYTE || (candidateTypeTag == TypeTags.INT && !candidateLiteral.isConstant && isByteLiteralValue((Long) candidateValue))) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.INT: if (candidateTypeTag == TypeTags.INT) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.SIGNED32_INT: if (candidateTypeTag == TypeTags.INT && isSigned32LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.SIGNED16_INT: if (candidateTypeTag == TypeTags.INT && isSigned16LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.SIGNED8_INT: if (candidateTypeTag == TypeTags.INT && isSigned8LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.UNSIGNED32_INT: if (candidateTypeTag == TypeTags.INT && isUnsigned32LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.UNSIGNED16_INT: if (candidateTypeTag == TypeTags.INT && isUnsigned16LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.UNSIGNED8_INT: if (candidateTypeTag == TypeTags.INT && isUnsigned8LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.FLOAT: String baseValueStr = String.valueOf(baseValue); String originalValue = baseLiteral.originalValue != null ? baseLiteral.originalValue : baseValueStr; if (NumericLiteralSupport.isDecimalDiscriminated(originalValue)) { return false; } double baseDoubleVal = Double.parseDouble(baseValueStr); double candidateDoubleVal; if (candidateTypeTag == TypeTags.INT && !candidateLiteral.isConstant) { candidateDoubleVal = ((Long) candidateValue).doubleValue(); return baseDoubleVal == candidateDoubleVal; } else if (candidateTypeTag == TypeTags.FLOAT) { candidateDoubleVal = Double.parseDouble(String.valueOf(candidateValue)); return baseDoubleVal == candidateDoubleVal; } break; case TypeTags.DECIMAL: BigDecimal baseDecimalVal = NumericLiteralSupport.parseBigDecimal(baseValue); BigDecimal candidateDecimalVal; if (candidateTypeTag == TypeTags.INT && !candidateLiteral.isConstant) { candidateDecimalVal = new BigDecimal((long) candidateValue, MathContext.DECIMAL128); return baseDecimalVal.compareTo(candidateDecimalVal) == 0; } else if (candidateTypeTag == TypeTags.FLOAT && !candidateLiteral.isConstant || candidateTypeTag == TypeTags.DECIMAL) { if (NumericLiteralSupport.isFloatDiscriminated(String.valueOf(candidateValue))) { return false; } candidateDecimalVal = NumericLiteralSupport.parseBigDecimal(candidateValue); return baseDecimalVal.compareTo(candidateDecimalVal) == 0; } break; default: return baseValue.equals(candidateValue); } return false; } boolean isByteLiteralValue(Long longObject) { return (longObject.intValue() >= BBYTE_MIN_VALUE && longObject.intValue() <= BBYTE_MAX_VALUE); } boolean isSigned32LiteralValue(Long longObject) { return (longObject >= SIGNED32_MIN_VALUE && longObject <= SIGNED32_MAX_VALUE); } boolean isSigned16LiteralValue(Long longObject) { return (longObject.intValue() >= SIGNED16_MIN_VALUE && longObject.intValue() <= SIGNED16_MAX_VALUE); } boolean isSigned8LiteralValue(Long longObject) { return (longObject.intValue() >= SIGNED8_MIN_VALUE && longObject.intValue() <= SIGNED8_MAX_VALUE); } boolean isUnsigned32LiteralValue(Long longObject) { return (longObject >= 0 && longObject <= UNSIGNED32_MAX_VALUE); } boolean isUnsigned16LiteralValue(Long longObject) { return (longObject.intValue() >= 0 && longObject.intValue() <= UNSIGNED16_MAX_VALUE); } boolean isUnsigned8LiteralValue(Long longObject) { return (longObject.intValue() >= 0 && longObject.intValue() <= UNSIGNED8_MAX_VALUE); } boolean isCharLiteralValue(String literal) { return (literal.codePoints().count() == 1); } /** * Method to retrieve a type representing all the values in the value space of a finite type that are assignable to * the target type. * * @param finiteType the finite type * @param targetType the target type * @return a new finite type if at least one value in the value space of the specified finiteType is * assignable to targetType (the same if all are assignable), else semanticError */ BType getTypeForFiniteTypeValuesAssignableToType(BFiniteType finiteType, BType targetType) { if (isAssignable(finiteType, targetType)) { return finiteType; } Set<BLangExpression> matchingValues = new HashSet<>(); for (BLangExpression expr : finiteType.getValueSpace()) { BLangLiteral literal = (BLangLiteral) expr; if (isAssignable(expr.getBType(), targetType) || isAssignableToFiniteType(targetType, literal) || isAssignableToFiniteTypeMemberInUnion(literal, targetType) || isAssignableToBuiltinSubtypeInTargetType(literal, targetType)) { matchingValues.add(expr); } } if (matchingValues.isEmpty()) { return symTable.semanticError; } BTypeSymbol finiteTypeSymbol = Symbols.createTypeSymbol(SymTag.FINITE_TYPE, finiteType.tsymbol.flags, names.fromString("$anonType$" + UNDERSCORE + finiteTypeCount++), finiteType.tsymbol.pkgID, null, finiteType.tsymbol.owner, finiteType.tsymbol.pos, VIRTUAL); BFiniteType intersectingFiniteType = new BFiniteType(finiteTypeSymbol, matchingValues); finiteTypeSymbol.type = intersectingFiniteType; return intersectingFiniteType; } private boolean isAssignableToFiniteTypeMemberInUnion(BLangLiteral expr, BType targetType) { if (targetType.tag != TypeTags.UNION) { return false; } for (BType memType : ((BUnionType) targetType).getMemberTypes()) { if (isAssignableToFiniteType(memType, expr)) { return true; } } return false; } private boolean isAssignableToBuiltinSubtypeInTargetType(BLangLiteral literal, BType targetType) { if (targetType.tag == TypeTags.UNION) { for (BType memberType : ((BUnionType) targetType).getMemberTypes()) { if (isLiteralCompatibleWithBuiltinTypeWithSubTypes(literal, memberType)) { return true; } } } return isLiteralCompatibleWithBuiltinTypeWithSubTypes(literal, targetType); } public boolean isLiteralCompatibleWithBuiltinTypeWithSubTypes(BLangLiteral literal, BType targetType) { BType literalType = literal.getBType(); if (literalType.tag == targetType.tag) { return true; } switch (targetType.tag) { case TypeTags.BYTE: return literalType.tag == TypeTags.INT && isByteLiteralValue((Long) literal.value); case TypeTags.SIGNED32_INT: return literalType.tag == TypeTags.INT && isSigned32LiteralValue((Long) literal.value); case TypeTags.SIGNED16_INT: return literalType.tag == TypeTags.INT && isSigned16LiteralValue((Long) literal.value); case TypeTags.SIGNED8_INT: return literalType.tag == TypeTags.INT && isSigned8LiteralValue((Long) literal.value); case TypeTags.UNSIGNED32_INT: return literalType.tag == TypeTags.INT && isUnsigned32LiteralValue((Long) literal.value); case TypeTags.UNSIGNED16_INT: return literalType.tag == TypeTags.INT && isUnsigned16LiteralValue((Long) literal.value); case TypeTags.UNSIGNED8_INT: return literalType.tag == TypeTags.INT && isUnsigned8LiteralValue((Long) literal.value); case TypeTags.CHAR_STRING: return literalType.tag == TypeTags.STRING && isCharLiteralValue((String) literal.value); default: return false; } } /** * Method to retrieve a type representing all the member types of a union type that are assignable to * the target type. * * @param unionType the union type * @param targetType the target type * @param intersectionContext * @param visitedTypes cache to capture visited types * @return a single type or a new union type if at least one member type of the union type is * assignable to targetType, else semanticError */ BType getTypeForUnionTypeMembersAssignableToType(BUnionType unionType, BType targetType, SymbolEnv env, IntersectionContext intersectionContext, LinkedHashSet<BType> visitedTypes) { List<BType> intersection = new LinkedList<>(); if (!visitedTypes.add(unionType)) { return unionType; } unionType.getMemberTypes().forEach(memType -> { BType memberIntersectionType = getTypeIntersection(intersectionContext, memType, targetType, env, visitedTypes); if (memberIntersectionType != symTable.semanticError) { intersection.add(memberIntersectionType); } }); if (intersection.isEmpty()) { return symTable.semanticError; } if (intersection.size() == 1) { return intersection.get(0); } else { return BUnionType.create(null, new LinkedHashSet<>(intersection)); } } boolean validEqualityIntersectionExists(BType lhsType, BType rhsType) { if (!isAnydata(lhsType) && !isAnydata(rhsType)) { return false; } if (isAssignable(lhsType, rhsType) || isAssignable(rhsType, lhsType)) { return true; } Set<BType> lhsTypes = expandAndGetMemberTypesRecursive(lhsType); Set<BType> rhsTypes = expandAndGetMemberTypesRecursive(rhsType); return equalityIntersectionExists(lhsTypes, rhsTypes); } private boolean equalityIntersectionExists(Set<BType> lhsTypes, Set<BType> rhsTypes) { if ((lhsTypes.contains(symTable.anydataType) && rhsTypes.stream().anyMatch(type -> type.tag != TypeTags.ERROR)) || (rhsTypes.contains(symTable.anydataType) && lhsTypes.stream().anyMatch(type -> type.tag != TypeTags.ERROR))) { return true; } boolean matchFound = false; for (BType lhsType : lhsTypes) { for (BType rhsType : rhsTypes) { if (isAssignable(lhsType, rhsType) || isAssignable(rhsType, lhsType)) { matchFound = true; break; } } if (matchFound) { break; } } if (!matchFound) { matchFound = equalityIntersectionExistsForComplexTypes(lhsTypes, rhsTypes); } return matchFound; } boolean validNumericTypeExists(BType type) { if (isBasicNumericType(type)) { return true; } switch (type.tag) { case TypeTags.UNION: BUnionType unionType = (BUnionType) type; Set<BType> memberTypes = unionType.getMemberTypes(); BType firstTypeInUnion = getReferredType(memberTypes.iterator().next()); if (firstTypeInUnion.tag == TypeTags.FINITE) { Set<BLangExpression> valSpace = ((BFiniteType) firstTypeInUnion).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BType memType : memberTypes) { if (memType.tag == TypeTags.FINITE) { if (!checkValueSpaceHasSameType((BFiniteType) memType, baseExprType)) { return false; } } if (!checkValidNumericTypesInUnion(memType, firstTypeInUnion.tag)) { return false; } } } else { for (BType memType : memberTypes) { memType = getReferredType(memType); if (!checkValidNumericTypesInUnion(memType, firstTypeInUnion.tag)) { return false; } } } return true; case TypeTags.FINITE: Set<BLangExpression> valSpace = ((BFiniteType) type).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BLangExpression expr : valSpace) { if (!checkValueSpaceHasSameType((BFiniteType) type, baseExprType)) { return false; } if (!validNumericTypeExists(expr.getBType())) { return false; } } return true; case TypeTags.TYPEREFDESC: return validNumericTypeExists(getReferredType(type)); default: return false; } } private boolean checkValidNumericTypesInUnion(BType memType, int firstTypeTag) { if (memType.tag != firstTypeTag && !checkTypesBelongToInt(memType.tag, firstTypeTag)) { return false; } return validNumericTypeExists(memType); } private boolean checkTypesBelongToInt(int firstTypeTag, int secondTypeTag) { return ((TypeTags.isIntegerTypeTag(firstTypeTag) || firstTypeTag == TypeTags.BYTE) && (TypeTags.isIntegerTypeTag(secondTypeTag) || secondTypeTag == TypeTags.BYTE)); } boolean validIntegerTypeExists(BType input) { BType type = getReferredType(input); if (TypeTags.isIntegerTypeTag(type.tag)) { return true; } switch (type.tag) { case TypeTags.BYTE: return true; case TypeTags.UNION: LinkedHashSet<BType> memberTypes = ((BUnionType) type).getMemberTypes(); for (BType memberType : memberTypes) { memberType = getReferredType(memberType); if (!validIntegerTypeExists(memberType)) { return false; } } return true; case TypeTags.FINITE: Set<BLangExpression> valueSpace = ((BFiniteType) type).getValueSpace(); for (BLangExpression expr : valueSpace) { if (!validIntegerTypeExists(expr.getBType())) { return false; } } return true; default: return false; } } boolean validStringOrXmlTypeExists(BType bType) { BType type = getReferredType(bType); if (TypeTags.isStringTypeTag(type.tag)) { return true; } switch (type.tag) { case TypeTags.XML: case TypeTags.XML_TEXT: return true; case TypeTags.UNION: BUnionType unionType = (BUnionType) type; Set<BType> memberTypes = unionType.getMemberTypes(); BType firstTypeInUnion = getReferredType(memberTypes.iterator().next()); if (firstTypeInUnion.tag == TypeTags.FINITE) { Set<BLangExpression> valSpace = ((BFiniteType) firstTypeInUnion).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BType memType : memberTypes) { memType = getReferredType(memType); if (memType.tag == TypeTags.FINITE) { if (!checkValueSpaceHasSameType((BFiniteType) memType, baseExprType)) { return false; } } if (!checkValidStringOrXmlTypesInUnion(memType, firstTypeInUnion.tag)) { return false; } } } else { for (BType memType : memberTypes) { memType = getReferredType(memType); if (!checkValidStringOrXmlTypesInUnion(memType, firstTypeInUnion.tag)) { return false; } } } return true; case TypeTags.FINITE: Set<BLangExpression> valSpace = ((BFiniteType) type).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BLangExpression expr : valSpace) { if (!checkValueSpaceHasSameType((BFiniteType) type, baseExprType)) { return false; } if (!validStringOrXmlTypeExists(expr.getBType())) { return false; } } return true; default: return false; } } private boolean checkValidStringOrXmlTypesInUnion(BType memType, int firstTypeTag) { if (memType.tag != firstTypeTag && !checkTypesBelongToStringOrXml(memType.tag, firstTypeTag)) { return false; } return validStringOrXmlTypeExists(memType); } private boolean checkTypesBelongToStringOrXml(int firstTypeTag, int secondTypeTag) { return (TypeTags.isStringTypeTag(firstTypeTag) && TypeTags.isStringTypeTag(secondTypeTag)) || (TypeTags.isXMLTypeTag(firstTypeTag) && TypeTags.isXMLTypeTag(secondTypeTag)); } public boolean checkTypeContainString(BType type) { if (TypeTags.isStringTypeTag(type.tag)) { return true; } switch (type.tag) { case TypeTags.UNION: for (BType memType : ((BUnionType) type).getMemberTypes()) { if (!checkTypeContainString(memType)) { return false; } } return true; case TypeTags.FINITE: Set<BLangExpression> valSpace = ((BFiniteType) type).getValueSpace(); for (BLangExpression expr : valSpace) { if (!checkTypeContainString(expr.getBType())) { return false; } } return true; case TypeTags.TYPEREFDESC: return checkTypeContainString(getReferredType(type)); default: return false; } } /** * Retrieves member types of the specified type, expanding maps/arrays of/constrained by unions types to individual * maps/arrays. * * e.g., (string|int)[] would cause three entries as string[], int[], (string|int)[] * * @param bType the type for which member types needs to be identified * @return a set containing all the retrieved member types */ public Set<BType> expandAndGetMemberTypesRecursive(BType bType) { HashSet<BType> visited = new HashSet<>(); return expandAndGetMemberTypesRecursiveHelper(bType, visited); } private Set<BType> expandAndGetMemberTypesRecursiveHelper(BType bType, HashSet<BType> visited) { Set<BType> memberTypes = new LinkedHashSet<>(); switch (bType.tag) { case TypeTags.BYTE: case TypeTags.INT: memberTypes.add(symTable.intType); memberTypes.add(symTable.byteType); break; case TypeTags.FINITE: BFiniteType expType = (BFiniteType) bType; expType.getValueSpace().forEach(value -> { memberTypes.add(value.getBType()); }); break; case TypeTags.UNION: BUnionType unionType = (BUnionType) bType; if (!visited.add(unionType)) { return memberTypes; } unionType.getMemberTypes().forEach(member -> { memberTypes.addAll(expandAndGetMemberTypesRecursiveHelper(member, visited)); }); break; case TypeTags.ARRAY: BType arrayElementType = ((BArrayType) bType).getElementType(); if (((BArrayType) bType).getSize() != -1) { memberTypes.add(new BArrayType(arrayElementType)); } if (arrayElementType.tag == TypeTags.UNION) { Set<BType> elementUnionTypes = expandAndGetMemberTypesRecursiveHelper(arrayElementType, visited); elementUnionTypes.forEach(elementUnionType -> { memberTypes.add(new BArrayType(elementUnionType)); }); } memberTypes.add(bType); break; case TypeTags.MAP: BType mapConstraintType = ((BMapType) bType).getConstraint(); if (mapConstraintType.tag == TypeTags.UNION) { Set<BType> constraintUnionTypes = expandAndGetMemberTypesRecursiveHelper(mapConstraintType, visited); constraintUnionTypes.forEach(constraintUnionType -> { memberTypes.add(new BMapType(TypeTags.MAP, constraintUnionType, symTable.mapType.tsymbol)); }); } memberTypes.add(bType); break; case TypeTags.TYPEREFDESC: return expandAndGetMemberTypesRecursiveHelper(getReferredType(bType), visited); default: memberTypes.add(bType); } return memberTypes; } private boolean tupleIntersectionExists(BTupleType lhsType, BTupleType rhsType) { if (lhsType.getTupleTypes().size() != rhsType.getTupleTypes().size()) { return false; } List<BType> lhsMemberTypes = lhsType.getTupleTypes(); List<BType> rhsMemberTypes = rhsType.getTupleTypes(); for (int i = 0; i < lhsType.getTupleTypes().size(); i++) { if (!equalityIntersectionExists(expandAndGetMemberTypesRecursive(lhsMemberTypes.get(i)), expandAndGetMemberTypesRecursive(rhsMemberTypes.get(i)))) { return false; } } return true; } private boolean equalityIntersectionExistsForComplexTypes(Set<BType> lhsTypes, Set<BType> rhsTypes) { for (BType lhsMemberType : lhsTypes) { switch (lhsMemberType.tag) { case TypeTags.INT: case TypeTags.STRING: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.BOOLEAN: case TypeTags.NIL: if (rhsTypes.stream().anyMatch(rhsMemberType -> rhsMemberType.tag == TypeTags.JSON)) { return true; } break; case TypeTags.JSON: if (jsonEqualityIntersectionExists(rhsTypes)) { return true; } break; case TypeTags.TUPLE: if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.TUPLE && tupleIntersectionExists((BTupleType) lhsMemberType, (BTupleType) rhsMemberType))) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.ARRAY && arrayTupleEqualityIntersectionExists((BArrayType) rhsMemberType, (BTupleType) lhsMemberType))) { return true; } break; case TypeTags.ARRAY: if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.ARRAY && equalityIntersectionExists( expandAndGetMemberTypesRecursive(((BArrayType) lhsMemberType).eType), expandAndGetMemberTypesRecursive(((BArrayType) rhsMemberType).eType)))) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.TUPLE && arrayTupleEqualityIntersectionExists((BArrayType) lhsMemberType, (BTupleType) rhsMemberType))) { return true; } break; case TypeTags.MAP: if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.MAP && equalityIntersectionExists( expandAndGetMemberTypesRecursive(((BMapType) lhsMemberType).constraint), expandAndGetMemberTypesRecursive(((BMapType) rhsMemberType).constraint)))) { return true; } if (!isAssignable(((BMapType) lhsMemberType).constraint, symTable.errorType) && rhsTypes.stream().anyMatch(rhsMemberType -> rhsMemberType.tag == TypeTags.JSON)) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.RECORD && mapRecordEqualityIntersectionExists((BMapType) lhsMemberType, (BRecordType) rhsMemberType))) { return true; } break; case TypeTags.OBJECT: case TypeTags.RECORD: if (rhsTypes.stream().anyMatch( rhsMemberType -> checkStructEquivalency(rhsMemberType, lhsMemberType) || checkStructEquivalency(lhsMemberType, rhsMemberType))) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.RECORD && recordEqualityIntersectionExists((BRecordType) lhsMemberType, (BRecordType) rhsMemberType))) { return true; } if (rhsTypes.stream().anyMatch(rhsMemberType -> rhsMemberType.tag == TypeTags.JSON) && jsonEqualityIntersectionExists(expandAndGetMemberTypesRecursive(lhsMemberType))) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.MAP && mapRecordEqualityIntersectionExists((BMapType) rhsMemberType, (BRecordType) lhsMemberType))) { return true; } break; } } return false; } private boolean arrayTupleEqualityIntersectionExists(BArrayType arrayType, BTupleType tupleType) { Set<BType> elementTypes = expandAndGetMemberTypesRecursive(arrayType.eType); return tupleType.tupleTypes.stream() .allMatch(tupleMemType -> equalityIntersectionExists(elementTypes, expandAndGetMemberTypesRecursive(tupleMemType))); } private boolean recordEqualityIntersectionExists(BRecordType lhsType, BRecordType rhsType) { Map<String, BField> lhsFields = lhsType.fields; Map<String, BField> rhsFields = rhsType.fields; List<Name> matchedFieldNames = new ArrayList<>(); for (BField lhsField : lhsFields.values()) { if (rhsFields.containsKey(lhsField.name.value)) { if (!equalityIntersectionExists(expandAndGetMemberTypesRecursive(lhsField.type), expandAndGetMemberTypesRecursive( rhsFields.get(lhsField.name.value).type))) { return false; } matchedFieldNames.add(lhsField.getName()); } else { if (Symbols.isFlagOn(lhsField.symbol.flags, Flags.OPTIONAL)) { break; } if (rhsType.sealed) { return false; } if (!equalityIntersectionExists(expandAndGetMemberTypesRecursive(lhsField.type), expandAndGetMemberTypesRecursive(rhsType.restFieldType))) { return false; } } } for (BField rhsField : rhsFields.values()) { if (matchedFieldNames.contains(rhsField.getName())) { continue; } if (!Symbols.isFlagOn(rhsField.symbol.flags, Flags.OPTIONAL)) { if (lhsType.sealed) { return false; } if (!equalityIntersectionExists(expandAndGetMemberTypesRecursive(rhsField.type), expandAndGetMemberTypesRecursive(lhsType.restFieldType))) { return false; } } } return true; } private boolean mapRecordEqualityIntersectionExists(BMapType mapType, BRecordType recordType) { Set<BType> mapConstrTypes = expandAndGetMemberTypesRecursive(mapType.getConstraint()); for (BField field : recordType.fields.values()) { if (!Symbols.isFlagOn(field.symbol.flags, Flags.OPTIONAL) && !equalityIntersectionExists(mapConstrTypes, expandAndGetMemberTypesRecursive(field.type))) { return false; } } return true; } private boolean jsonEqualityIntersectionExists(Set<BType> typeSet) { for (BType type : typeSet) { switch (type.tag) { case TypeTags.MAP: if (!isAssignable(((BMapType) type).constraint, symTable.errorType)) { return true; } break; case TypeTags.RECORD: BRecordType recordType = (BRecordType) type; if (recordType.fields.values().stream() .allMatch(field -> Symbols.isFlagOn(field.symbol.flags, Flags.OPTIONAL) || !isAssignable(field.type, symTable.errorType))) { return true; } break; default: if (isAssignable(type, symTable.jsonType)) { return true; } } } return false; } public BType getRemainingMatchExprType(BType originalType, BType typeToRemove) { switch (originalType.tag) { case TypeTags.UNION: return getRemainingType((BUnionType) originalType, getAllTypes(typeToRemove, true)); case TypeTags.FINITE: return getRemainingType((BFiniteType) originalType, getAllTypes(typeToRemove, true)); case TypeTags.TUPLE: return getRemainingType((BTupleType) originalType, typeToRemove); default: return originalType; } } private BType getRemainingType(BTupleType originalType, BType typeToRemove) { switch (typeToRemove.tag) { case TypeTags.TUPLE: return getRemainingType(originalType, (BTupleType) typeToRemove); case TypeTags.ARRAY: return getRemainingType(originalType, (BArrayType) typeToRemove); default: return originalType; } } private BType getRemainingType(BTupleType originalType, BTupleType typeToRemove) { if (originalType.restType != null) { return originalType; } List<BType> originalTupleTypes = new ArrayList<>(originalType.tupleTypes); List<BType> typesToRemove = new ArrayList<>(typeToRemove.tupleTypes); if (originalTupleTypes.size() < typesToRemove.size()) { return originalType; } List<BType> tupleTypes = new ArrayList<>(); for (int i = 0; i < originalTupleTypes.size(); i++) { tupleTypes.add(getRemainingMatchExprType(originalTupleTypes.get(i), typesToRemove.get(i))); } if (typeToRemove.restType == null) { return new BTupleType(tupleTypes); } if (originalTupleTypes.size() == typesToRemove.size()) { return originalType; } for (int i = typesToRemove.size(); i < originalTupleTypes.size(); i++) { tupleTypes.add(getRemainingMatchExprType(originalTupleTypes.get(i), typeToRemove.restType)); } return new BTupleType(tupleTypes); } private BType getRemainingType(BTupleType originalType, BArrayType typeToRemove) { BType eType = typeToRemove.eType; List<BType> tupleTypes = new ArrayList<>(); for (BType tupleType : originalType.tupleTypes) { tupleTypes.add(getRemainingMatchExprType(tupleType, eType)); } BTupleType remainingType = new BTupleType(tupleTypes); if (originalType.restType != null) { remainingType.restType = getRemainingMatchExprType(originalType.restType, eType); } return remainingType; } public BType getRemainingType(BType originalType, BType typeToRemove) { switch (originalType.tag) { case TypeTags.UNION: return getRemainingType((BUnionType) originalType, getAllTypes(typeToRemove, true)); case TypeTags.FINITE: return getRemainingType((BFiniteType) originalType, getAllTypes(typeToRemove, true)); case TypeTags.READONLY: return getRemainingType((BReadonlyType) originalType, typeToRemove); case TypeTags.TYPEREFDESC: return getRemainingType(((BTypeReferenceType) originalType).referredType, typeToRemove); default: return originalType; } } private BType getRemainingType(BReadonlyType originalType, BType removeType) { if (removeType.tag == TypeTags.ERROR) { return symTable.anyAndReadonly; } return originalType; } public BType getTypeIntersection(IntersectionContext intersectionContext, BType lhsType, BType rhsType, SymbolEnv env) { return getTypeIntersection(intersectionContext, lhsType, rhsType, env, new LinkedHashSet<>()); } private BType getTypeIntersection(IntersectionContext intersectionContext, BType lhsType, BType rhsType, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { List<BType> rhsTypeComponents = getAllTypes(rhsType, false); LinkedHashSet<BType> intersection = new LinkedHashSet<>(rhsTypeComponents.size()); for (BType rhsComponent : rhsTypeComponents) { BType it = getIntersection(intersectionContext, lhsType, env, rhsComponent, new LinkedHashSet<>(visitedTypes)); if (it != null) { intersection.add(it); } } if (intersection.isEmpty()) { if (lhsType.tag == TypeTags.NULL_SET) { return lhsType; } return symTable.semanticError; } if (intersection.size() == 1) { return intersection.toArray(new BType[0])[0]; } else { return BUnionType.create(null, intersection); } } private BType getIntersection(IntersectionContext intersectionContext, BType lhsType, SymbolEnv env, BType type, LinkedHashSet<BType> visitedTypes) { lhsType = getEffectiveTypeForIntersection(lhsType); type = getEffectiveTypeForIntersection(type); if (intersectionContext.preferNonGenerativeIntersection) { if (isAssignable(type, lhsType)) { return type; } else if (isAssignable(lhsType, type)) { return lhsType; } } type = getReferredType(type); lhsType = getReferredType(lhsType); if (type.tag == TypeTags.ERROR && lhsType.tag == TypeTags.ERROR) { BType intersectionType = getIntersectionForErrorTypes(intersectionContext, lhsType, type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.RECORD && lhsType.tag == TypeTags.RECORD) { BType intersectionType = createRecordIntersection(intersectionContext, (BRecordType) lhsType, (BRecordType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.MAP && lhsType.tag == TypeTags.RECORD) { BType intersectionType = createRecordIntersection(intersectionContext, (BRecordType) lhsType, getEquivalentRecordType((BMapType) type), env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.RECORD && lhsType.tag == TypeTags.MAP) { BType intersectionType = createRecordIntersection(intersectionContext, getEquivalentRecordType((BMapType) lhsType), (BRecordType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (!intersectionContext.preferNonGenerativeIntersection && isAssignable(type, lhsType)) { return type; } else if (!intersectionContext.preferNonGenerativeIntersection && isAssignable(lhsType, type)) { return lhsType; } else if (lhsType.tag == TypeTags.FINITE) { BType intersectionType = getTypeForFiniteTypeValuesAssignableToType((BFiniteType) lhsType, type); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.FINITE) { BType intersectionType = getTypeForFiniteTypeValuesAssignableToType((BFiniteType) type, lhsType); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (lhsType.tag == TypeTags.UNION) { BType intersectionType = getTypeForUnionTypeMembersAssignableToType((BUnionType) lhsType, type, env, intersectionContext, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.UNION) { BType intersectionType = getTypeForUnionTypeMembersAssignableToType((BUnionType) type, lhsType, env, intersectionContext, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.MAP && lhsType.tag == TypeTags.MAP) { BType intersectionConstraintTypeType = getIntersection(intersectionContext, ((BMapType) lhsType).constraint, env, ((BMapType) type).constraint, visitedTypes); if (intersectionConstraintTypeType == null || intersectionConstraintTypeType == symTable.semanticError) { return null; } return new BMapType(TypeTags.MAP, intersectionConstraintTypeType, null); } else if (type.tag == TypeTags.ARRAY && lhsType.tag == TypeTags.TUPLE) { BType intersectionType = createArrayAndTupleIntersection(intersectionContext, (BArrayType) type, (BTupleType) lhsType, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.TUPLE && lhsType.tag == TypeTags.ARRAY) { BType intersectionType = createArrayAndTupleIntersection(intersectionContext, (BArrayType) lhsType, (BTupleType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.TUPLE && lhsType.tag == TypeTags.TUPLE) { BType intersectionType = createTupleAndTupleIntersection(intersectionContext, (BTupleType) lhsType, (BTupleType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (isAnydataOrJson(type) && lhsType.tag == TypeTags.RECORD) { BType intersectionType = createRecordIntersection(intersectionContext, (BRecordType) lhsType, getEquivalentRecordType(getMapTypeForAnydataOrJson(type)), env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.RECORD && isAnydataOrJson(lhsType)) { BType intersectionType = createRecordIntersection(intersectionContext, getEquivalentRecordType(getMapTypeForAnydataOrJson(lhsType)), (BRecordType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (isAnydataOrJson(type) && lhsType.tag == TypeTags.MAP) { return getIntersection(intersectionContext, lhsType, env, getMapTypeForAnydataOrJson(type), visitedTypes); } else if (type.tag == TypeTags.MAP && isAnydataOrJson(lhsType)) { return getIntersection(intersectionContext, getMapTypeForAnydataOrJson(lhsType), env, type, visitedTypes); } else if (isAnydataOrJson(type) && lhsType.tag == TypeTags.TUPLE) { BType intersectionType = createArrayAndTupleIntersection(intersectionContext, getArrayTypeForAnydataOrJson(type), (BTupleType) lhsType, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.TUPLE && isAnydataOrJson(lhsType)) { BType intersectionType = createArrayAndTupleIntersection(intersectionContext, getArrayTypeForAnydataOrJson(lhsType), (BTupleType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (isAnydataOrJson(type) && lhsType.tag == TypeTags.ARRAY) { BType elementIntersection = getIntersection(intersectionContext, ((BArrayType) lhsType).eType, env, type, visitedTypes); if (elementIntersection == null) { return elementIntersection; } return new BArrayType(elementIntersection); } else if (type.tag == TypeTags.ARRAY && isAnydataOrJson(lhsType)) { BType elementIntersection = getIntersection(intersectionContext, lhsType, env, ((BArrayType) type).eType, visitedTypes); if (elementIntersection == null) { return elementIntersection; } return new BArrayType(elementIntersection); } else if (type.tag == TypeTags.NULL_SET) { return type; } return null; } private BType getEffectiveTypeForIntersection(BType bType) { BType type = getReferredType(bType); if (type.tag != TypeTags.INTERSECTION) { return bType; } BType effectiveType = ((BIntersectionType) type).effectiveType; return effectiveType.tag == TypeTags.UNION && ((BUnionType) effectiveType).isCyclic ? type : effectiveType; } private boolean isAnydataOrJson(BType type) { switch (type.tag) { case TypeTags.ANYDATA: case TypeTags.JSON: return true; } return false; } private BMapType getMapTypeForAnydataOrJson(BType type) { BMapType mapType = type.tag == TypeTags.ANYDATA ? symTable.mapAnydataType : symTable.mapJsonType; if (isImmutable(type)) { return (BMapType) ImmutableTypeCloner.getEffectiveImmutableType(null, this, mapType, env, symTable, anonymousModelHelper, names); } return mapType; } private BArrayType getArrayTypeForAnydataOrJson(BType type) { BArrayType arrayType = type.tag == TypeTags.ANYDATA ? symTable.arrayAnydataType : symTable.arrayJsonType; if (isImmutable(type)) { return (BArrayType) ImmutableTypeCloner.getEffectiveImmutableType(null, this, arrayType, env, symTable, anonymousModelHelper, names); } return arrayType; } private BType createArrayAndTupleIntersection(IntersectionContext intersectionContext, BArrayType arrayType, BTupleType tupleType, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { if (!visitedTypes.add(tupleType)) { return tupleType; } List<BType> tupleTypes = tupleType.tupleTypes; if (arrayType.state == BArrayState.CLOSED && tupleTypes.size() != arrayType.size) { if (tupleTypes.size() > arrayType.size) { return symTable.semanticError; } if (tupleType.restType == null) { return symTable.semanticError; } } List<BType> tupleMemberTypes = new ArrayList<>(tupleTypes.size()); BType eType = arrayType.eType; for (BType memberType : tupleTypes) { BType intersectionType = getTypeIntersection(intersectionContext, memberType, eType, env, visitedTypes); if (intersectionType == symTable.semanticError) { return symTable.semanticError; } tupleMemberTypes.add(intersectionType); } if (tupleType.restType == null) { return new BTupleType(null, tupleMemberTypes); } BType restIntersectionType = getTypeIntersection(intersectionContext, tupleType.restType, eType, env, visitedTypes); if (restIntersectionType == symTable.semanticError) { return new BTupleType(null, tupleMemberTypes); } return new BTupleType(null, tupleMemberTypes, restIntersectionType, 0); } private BType createTupleAndTupleIntersection(IntersectionContext intersectionContext, BTupleType lhsTupleType, BTupleType tupleType, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { if (lhsTupleType.restType == null && tupleType.restType != null) { return symTable.semanticError; } if (lhsTupleType.restType == null && lhsTupleType.tupleTypes.size() != tupleType.tupleTypes.size()) { return symTable.semanticError; } List<BType> lhsTupleTypes = lhsTupleType.tupleTypes; List<BType> tupleTypes = tupleType.tupleTypes; if (lhsTupleTypes.size() > tupleTypes.size()) { return symTable.semanticError; } List<BType> tupleMemberTypes = new ArrayList<>(tupleTypes.size()); for (int i = 0; i < tupleTypes.size(); i++) { BType lhsType = (lhsTupleTypes.size() > i) ? lhsTupleTypes.get(i) : lhsTupleType.restType; BType intersectionType = getTypeIntersection(intersectionContext, tupleTypes.get(i), lhsType, env, visitedTypes); if (intersectionType == symTable.semanticError) { return symTable.semanticError; } tupleMemberTypes.add(intersectionType); } if (lhsTupleType.restType != null && tupleType.restType != null) { BType restIntersectionType = getTypeIntersection(intersectionContext, tupleType.restType, lhsTupleType.restType, env, visitedTypes); if (restIntersectionType == symTable.semanticError) { return new BTupleType(null, tupleMemberTypes); } return new BTupleType(null, tupleMemberTypes, restIntersectionType, 0); } return new BTupleType(null, tupleMemberTypes); } private BType getIntersectionForErrorTypes(IntersectionContext intersectionContext, BType lhsType, BType rhsType, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { BType detailIntersectionType = getTypeIntersection(intersectionContext, ((BErrorType) lhsType).detailType, ((BErrorType) rhsType).detailType, env, visitedTypes); if (detailIntersectionType == symTable.semanticError) { return symTable.semanticError; } BErrorType intersectionErrorType = createErrorType(lhsType, rhsType, detailIntersectionType, env); if (intersectionContext.createTypeDefs) { BTypeSymbol errorTSymbol = intersectionErrorType.tsymbol; BLangErrorType bLangErrorType = TypeDefBuilderHelper.createBLangErrorType(symTable.builtinPos, intersectionErrorType, env, anonymousModelHelper); BLangTypeDefinition errorTypeDefinition = TypeDefBuilderHelper.addTypeDefinition( intersectionErrorType, errorTSymbol, bLangErrorType, env); errorTypeDefinition.pos = symTable.builtinPos; } return intersectionErrorType; } private BType createRecordIntersection(IntersectionContext intersectionContext, BRecordType recordTypeOne, BRecordType recordTypeTwo, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { LinkedHashMap<String, BField> recordOneFields = recordTypeOne.fields; LinkedHashMap<String, BField> recordTwoFields = recordTypeTwo.fields; Set<String> recordOneKeys = recordOneFields.keySet(); Set<String> recordTwoKeys = recordTwoFields.keySet(); boolean isRecordOneClosed = recordTypeOne.sealed; boolean isRecordTwoClosed = recordTypeTwo.sealed; BType effectiveRecordOneRestFieldType = getConstraint(recordTypeOne); BType effectiveRecordTwoRestFieldType = getConstraint(recordTypeTwo); BRecordType newType = createAnonymousRecord(env); BTypeSymbol newTypeSymbol = newType.tsymbol; Set<String> addedKeys = new HashSet<>(); LinkedHashMap<String, BField> newTypeFields = newType.fields; if (!populateFields(intersectionContext.switchLeft(), recordTypeOne, env, recordOneFields, recordTwoFields, recordOneKeys, recordTwoKeys, isRecordTwoClosed, effectiveRecordTwoRestFieldType, newTypeSymbol, addedKeys, newTypeFields, visitedTypes)) { return symTable.semanticError; } if (!populateFields(intersectionContext.switchRight(), recordTypeTwo, env, recordTwoFields, recordOneFields, recordTwoKeys, recordOneKeys, isRecordOneClosed, effectiveRecordOneRestFieldType, newTypeSymbol, addedKeys, newTypeFields, visitedTypes)) { return symTable.semanticError; } BType restFieldType = getTypeIntersection(intersectionContext, effectiveRecordOneRestFieldType, effectiveRecordTwoRestFieldType, env, visitedTypes); if (setRestType(newType, restFieldType) == symTable.semanticError) { return symTable.semanticError; } if ((newType.sealed || newType.restFieldType == symTable.neverType) && (newTypeFields.isEmpty() || allReadOnlyFields(newTypeFields))) { newType.flags |= Flags.READONLY; newTypeSymbol.flags |= Flags.READONLY; } if (intersectionContext.createTypeDefs) { BLangRecordTypeNode recordTypeNode = TypeDefBuilderHelper.createRecordTypeNode( newType, env.enclPkg.packageID, symTable, symTable.builtinPos); BLangTypeDefinition recordTypeDef = TypeDefBuilderHelper.addTypeDefinition( newType, newType.tsymbol, recordTypeNode, env); env.enclPkg.symbol.scope.define(newType.tsymbol.name, newType.tsymbol); recordTypeDef.pos = symTable.builtinPos; } return newType; } private boolean populateFields(IntersectionContext intersectionContext, BRecordType lhsRecord, SymbolEnv env, LinkedHashMap<String, BField> lhsRecordFields, LinkedHashMap<String, BField> rhsRecordFields, Set<String> lhsRecordKeys, Set<String> rhsRecordKeys, boolean isRhsRecordClosed, BType effectiveRhsRecordRestFieldType, BTypeSymbol newTypeSymbol, Set<String> addedKeys, LinkedHashMap<String, BField> newTypeFields, LinkedHashSet<BType> visitedTypes) { for (String key : lhsRecordKeys) { BField lhsRecordField = lhsRecordFields.get(key); if (!validateRecordFieldDefaultValueForIntersection(intersectionContext, lhsRecordField, lhsRecord)) { return false; } if (!addedKeys.add(key)) { continue; } BType intersectionFieldType; long intersectionFlags = lhsRecordField.symbol.flags; BType recordOneFieldType = lhsRecordField.type; if (!rhsRecordKeys.contains(key)) { if (isRhsRecordClosed) { if (!Symbols.isFlagOn(lhsRecordField.symbol.flags, Flags.OPTIONAL)) { return false; } continue; } if (isNeverTypeOrStructureTypeWithARequiredNeverMember(effectiveRhsRecordRestFieldType) && !isNeverTypeOrStructureTypeWithARequiredNeverMember(recordOneFieldType)) { return false; } intersectionFieldType = getIntersection(intersectionContext, recordOneFieldType, env, effectiveRhsRecordRestFieldType, visitedTypes); if (intersectionFieldType == null || intersectionFieldType == symTable.semanticError) { if (Symbols.isFlagOn(lhsRecordField.symbol.flags, Flags.OPTIONAL)) { continue; } return false; } } else { BField rhsRecordField = rhsRecordFields.get(key); intersectionFieldType = getIntersection(intersectionContext, recordOneFieldType, env, rhsRecordField.type, visitedTypes); long rhsFieldFlags = rhsRecordField.symbol.flags; if (Symbols.isFlagOn(rhsFieldFlags, Flags.READONLY)) { intersectionFlags |= Flags.READONLY; } if (!Symbols.isFlagOn(rhsFieldFlags, Flags.OPTIONAL) && Symbols.isFlagOn(intersectionFlags, Flags.OPTIONAL)) { intersectionFlags &= ~Flags.OPTIONAL; } if (Symbols.isFlagOn(rhsFieldFlags, Flags.REQUIRED) && !Symbols.isFlagOn(intersectionFlags, Flags.REQUIRED)) { intersectionFlags |= Flags.REQUIRED; } } if (intersectionFieldType == null || intersectionFieldType == symTable.semanticError) { return false; } org.wso2.ballerinalang.compiler.util.Name name = lhsRecordField.name; BVarSymbol recordFieldSymbol; if (intersectionFieldType.tag == TypeTags.INVOKABLE && intersectionFieldType.tsymbol != null) { recordFieldSymbol = new BInvokableSymbol(lhsRecordField.symbol.tag, intersectionFlags, name, env.enclPkg.packageID, intersectionFieldType, newTypeSymbol, lhsRecordField.pos, SOURCE); BInvokableTypeSymbol tsymbol = (BInvokableTypeSymbol) intersectionFieldType.tsymbol; BInvokableSymbol invokableSymbol = (BInvokableSymbol) recordFieldSymbol; invokableSymbol.params = tsymbol == null ? null : new ArrayList<>(tsymbol.params); invokableSymbol.restParam = tsymbol.restParam; invokableSymbol.retType = tsymbol.returnType; invokableSymbol.flags = tsymbol.flags; } else { recordFieldSymbol = new BVarSymbol(intersectionFlags, name, env.enclPkg.packageID, intersectionFieldType, newTypeSymbol, lhsRecordField.pos, SOURCE); } newTypeFields.put(key, new BField(name, null, recordFieldSymbol)); newTypeSymbol.scope.define(name, recordFieldSymbol); } return true; } private boolean allReadOnlyFields(LinkedHashMap<String, BField> fields) { for (BField field : fields.values()) { if (!Symbols.isFlagOn(field.symbol.flags, Flags.READONLY)) { return false; } } return true; } private BType setRestType(BRecordType recordType, BType restType) { if (restType == symTable.semanticError) { recordType.restFieldType = symTable.semanticError; return symTable.semanticError; } if (restType == symTable.neverType) { recordType.sealed = true; recordType.restFieldType = symTable.noType; return symTable.noType; } recordType.restFieldType = restType; return restType; } private BType getConstraint(BRecordType recordType) { if (recordType.sealed) { return symTable.neverType; } return recordType.restFieldType; } private BRecordType createAnonymousRecord(SymbolEnv env) { EnumSet<Flag> flags = EnumSet.of(Flag.PUBLIC, Flag.ANONYMOUS); BRecordTypeSymbol recordSymbol = Symbols.createRecordSymbol(Flags.asMask(flags), Names.EMPTY, env.enclPkg.packageID, null, env.scope.owner, null, VIRTUAL); recordSymbol.name = names.fromString( anonymousModelHelper.getNextAnonymousTypeKey(env.enclPkg.packageID)); BInvokableType bInvokableType = new BInvokableType(new ArrayList<>(), symTable.nilType, null); BInvokableSymbol initFuncSymbol = Symbols.createFunctionSymbol( Flags.PUBLIC, Names.EMPTY, Names.EMPTY, env.enclPkg.symbol.pkgID, bInvokableType, env.scope.owner, false, symTable.builtinPos, VIRTUAL); initFuncSymbol.retType = symTable.nilType; recordSymbol.initializerFunc = new BAttachedFunction(Names.INIT_FUNCTION_SUFFIX, initFuncSymbol, bInvokableType, symTable.builtinPos); recordSymbol.scope = new Scope(recordSymbol); BRecordType recordType = new BRecordType(recordSymbol); recordType.tsymbol = recordSymbol; recordSymbol.type = recordType; return recordType; } private BRecordType getEquivalentRecordType(BMapType mapType) { BRecordType equivalentRecordType = new BRecordType(null); equivalentRecordType.sealed = false; equivalentRecordType.restFieldType = mapType.constraint; return equivalentRecordType; } private BErrorType createErrorType(BType lhsType, BType rhsType, BType detailType, SymbolEnv env) { BErrorType lhsErrorType = (BErrorType) lhsType; BErrorType rhsErrorType = (BErrorType) rhsType; BErrorType errorType = createErrorType(detailType, lhsType.flags, env); errorType.tsymbol.flags |= rhsType.flags; errorType.typeIdSet = BTypeIdSet.getIntersection(lhsErrorType.typeIdSet, rhsErrorType.typeIdSet); return errorType; } public BErrorType createErrorType(BType detailType, long flags, SymbolEnv env) { String name = anonymousModelHelper.getNextAnonymousIntersectionErrorTypeName(env.enclPkg.packageID); BErrorTypeSymbol errorTypeSymbol = Symbols.createErrorSymbol(flags | Flags.ANONYMOUS, names.fromString(name), env.enclPkg.symbol.pkgID, null, env.scope.owner, symTable.builtinPos, VIRTUAL); errorTypeSymbol.scope = new Scope(errorTypeSymbol); BErrorType errorType = new BErrorType(errorTypeSymbol, detailType); errorType.flags |= errorTypeSymbol.flags; errorTypeSymbol.type = errorType; errorType.typeIdSet = BTypeIdSet.emptySet(); return errorType; } private boolean populateRecordFields(IntersectionContext diagnosticContext, BRecordType newType, BType originalType, SymbolEnv env, BType constraint) { BTypeSymbol intersectionRecordSymbol = newType.tsymbol; if (originalType.getKind() != TypeKind.RECORD) { return true; } BRecordType originalRecordType = (BRecordType) originalType; LinkedHashMap<String, BField> fields = new LinkedHashMap<>(); for (BField origField : originalRecordType.fields.values()) { org.wso2.ballerinalang.compiler.util.Name origFieldName = origField.name; String nameString = origFieldName.value; if (!validateRecordFieldDefaultValueForIntersection(diagnosticContext, origField, originalRecordType)) { return false; } BType recordFieldType = validateRecordField(diagnosticContext, newType, origField, constraint, env); if (recordFieldType == symTable.semanticError) { return false; } BVarSymbol recordFieldSymbol = new BVarSymbol(origField.symbol.flags, origFieldName, env.enclPkg.packageID, recordFieldType, intersectionRecordSymbol, origField.pos, SOURCE); if (recordFieldType == symTable.neverType && Symbols.isFlagOn(recordFieldSymbol.flags, Flags.OPTIONAL)) { recordFieldSymbol.flags &= (~Flags.REQUIRED); recordFieldSymbol.flags |= Flags.OPTIONAL; } if (recordFieldType.tag == TypeTags.INVOKABLE && recordFieldType.tsymbol != null) { BInvokableTypeSymbol tsymbol = (BInvokableTypeSymbol) recordFieldType.tsymbol; BInvokableSymbol invokableSymbol = (BInvokableSymbol) recordFieldSymbol; invokableSymbol.params = tsymbol.params == null ? null : new ArrayList<>(tsymbol.params); invokableSymbol.restParam = tsymbol.restParam; invokableSymbol.retType = tsymbol.returnType; invokableSymbol.flags = tsymbol.flags; } fields.put(nameString, new BField(origFieldName, null, recordFieldSymbol)); intersectionRecordSymbol.scope.define(origFieldName, recordFieldSymbol); } newType.fields.putAll(fields); return true; } private boolean validateRecordFieldDefaultValueForIntersection(IntersectionContext diagnosticContext, BField field, BRecordType recordType) { if (field.symbol != null && field.symbol.isDefaultable && !diagnosticContext.ignoreDefaultValues) { diagnosticContext.logError(DiagnosticErrorCode.INTERSECTION_NOT_ALLOWED_WITH_TYPE, recordType, field.name); return false; } return true; } private BType validateRecordField(IntersectionContext intersectionContext, BRecordType newType, BField origField, BType constraint, SymbolEnv env) { if (hasField(newType, origField)) { return validateOverlappingFields(newType, origField); } if (constraint == null) { return origField.type; } BType fieldType = getTypeIntersection(intersectionContext, origField.type, constraint, env); if (fieldType.tag == TypeTags.NEVER && !Symbols.isOptional(origField.symbol)) { return symTable.semanticError; } if (fieldType != symTable.semanticError) { return fieldType; } if (Symbols.isOptional(origField.symbol)) { return symTable.neverType; } return symTable.semanticError; } private boolean hasField(BRecordType recordType, BField origField) { return recordType.fields.containsKey(origField.name.value); } private BType validateOverlappingFields(BRecordType newType, BField origField) { if (!hasField(newType, origField)) { return origField.type; } BField overlappingField = newType.fields.get(origField.name.value); if (isAssignable(overlappingField.type, origField.type)) { return overlappingField.type; } if (isAssignable(origField.type, overlappingField.type)) { return origField.type; } return symTable.semanticError; } private void removeErrorFromReadonlyType(List<BType> remainingTypes) { Iterator<BType> remainingIterator = remainingTypes.listIterator(); boolean addAnyAndReadOnly = false; while (remainingIterator.hasNext()) { BType remainingType = remainingIterator.next(); if (remainingType.tag != TypeTags.READONLY) { continue; } remainingIterator.remove(); addAnyAndReadOnly = true; } if (addAnyAndReadOnly) { remainingTypes.add(symTable.anyAndReadonly); } } private BType getRemainingType(BUnionType originalType, List<BType> removeTypes) { List<BType> remainingTypes = getAllTypes(originalType, true); boolean hasErrorToRemove = false; for (BType removeType : removeTypes) { remainingTypes.removeIf(type -> isAssignable(type, removeType)); if (!hasErrorToRemove && removeType.tag == TypeTags.ERROR) { hasErrorToRemove = true; } } if (hasErrorToRemove) { removeErrorFromReadonlyType(remainingTypes); } List<BType> finiteTypesToRemove = new ArrayList<>(); List<BType> finiteTypesToAdd = new ArrayList<>(); for (BType remainingType : remainingTypes) { if (remainingType.tag == TypeTags.FINITE) { BFiniteType finiteType = (BFiniteType) remainingType; finiteTypesToRemove.add(finiteType); BType remainingTypeWithMatchesRemoved = getRemainingType(finiteType, removeTypes); if (remainingTypeWithMatchesRemoved != symTable.semanticError) { finiteTypesToAdd.add(remainingTypeWithMatchesRemoved); } } } remainingTypes.removeAll(finiteTypesToRemove); remainingTypes.addAll(finiteTypesToAdd); if (remainingTypes.size() == 1) { return remainingTypes.get(0); } if (remainingTypes.isEmpty()) { return symTable.nullSet; } return BUnionType.create(null, new LinkedHashSet<>(remainingTypes)); } private BType getRemainingType(BFiniteType originalType, List<BType> removeTypes) { Set<BLangExpression> remainingValueSpace = new LinkedHashSet<>(); for (BLangExpression valueExpr : originalType.getValueSpace()) { boolean matchExists = false; for (BType remType : removeTypes) { if (isAssignable(valueExpr.getBType(), remType) || isAssignableToFiniteType(remType, (BLangLiteral) valueExpr)) { matchExists = true; break; } } if (!matchExists) { remainingValueSpace.add(valueExpr); } } if (remainingValueSpace.isEmpty()) { return symTable.semanticError; } BTypeSymbol finiteTypeSymbol = Symbols.createTypeSymbol(SymTag.FINITE_TYPE, originalType.tsymbol.flags, names.fromString("$anonType$" + UNDERSCORE + finiteTypeCount++), originalType.tsymbol.pkgID, null, originalType.tsymbol.owner, originalType.tsymbol.pos, VIRTUAL); BFiniteType intersectingFiniteType = new BFiniteType(finiteTypeSymbol, remainingValueSpace); finiteTypeSymbol.type = intersectingFiniteType; return intersectingFiniteType; } public BType getSafeType(BType type, boolean liftNil, boolean liftError) { if (liftNil) { switch (type.tag) { case TypeTags.JSON: return new BJSONType((BJSONType) type, false); case TypeTags.ANY: return new BAnyType(type.tag, type.tsymbol, false); case TypeTags.ANYDATA: return new BAnydataType((BAnydataType) type, false); case TypeTags.READONLY: if (liftError) { return symTable.anyAndReadonly; } return new BReadonlyType(type.tag, type.tsymbol, false); } } if (type.tag != TypeTags.UNION) { return type; } BUnionType unionType = (BUnionType) type; LinkedHashSet<BType> memTypes = new LinkedHashSet<>(unionType.getMemberTypes()); BUnionType errorLiftedType = BUnionType.create(null, memTypes); if (liftNil) { errorLiftedType.remove(symTable.nilType); } if (liftError) { LinkedHashSet<BType> bTypes = new LinkedHashSet<>(); for (BType t : errorLiftedType.getMemberTypes()) { if (t.tag != TypeTags.ERROR) { bTypes.add(t); } } memTypes = bTypes; errorLiftedType = BUnionType.create(null, memTypes); } if (errorLiftedType.getMemberTypes().size() == 1) { return errorLiftedType.getMemberTypes().toArray(new BType[0])[0]; } if (errorLiftedType.getMemberTypes().size() == 0) { return symTable.semanticError; } return errorLiftedType; } public List<BType> getAllTypes(BType type, boolean getReferenced) { if (type.tag != TypeTags.UNION) { if (getReferenced && type.tag == TypeTags.TYPEREFDESC) { return getAllTypes(((BTypeReferenceType) type).referredType, true); } else { return Lists.of(type); } } List<BType> memberTypes = new LinkedList<>(); ((BUnionType) type).getMemberTypes().forEach(memberType -> memberTypes.addAll(getAllTypes(memberType, true))); return memberTypes; } public boolean isAllowedConstantType(BType type) { switch (type.tag) { case TypeTags.BOOLEAN: case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.STRING: case TypeTags.NIL: return true; case TypeTags.MAP: return isAllowedConstantType(((BMapType) type).constraint); case TypeTags.FINITE: BLangExpression finiteValue = ((BFiniteType) type).getValueSpace().toArray(new BLangExpression[0])[0]; return isAllowedConstantType(finiteValue.getBType()); case TypeTags.TYPEREFDESC: return isAllowedConstantType(((BTypeReferenceType) type).referredType); default: return false; } } public boolean isValidLiteral(BLangLiteral literal, BType targetType) { BType literalType = literal.getBType(); if (literalType.tag == targetType.tag) { return true; } switch (targetType.tag) { case TypeTags.BYTE: return literalType.tag == TypeTags.INT && isByteLiteralValue((Long) literal.value); case TypeTags.DECIMAL: return literalType.tag == TypeTags.FLOAT || literalType.tag == TypeTags.INT; case TypeTags.FLOAT: return literalType.tag == TypeTags.INT; case TypeTags.SIGNED32_INT: return literalType.tag == TypeTags.INT && isSigned32LiteralValue((Long) literal.value); case TypeTags.SIGNED16_INT: return literalType.tag == TypeTags.INT && isSigned16LiteralValue((Long) literal.value); case TypeTags.SIGNED8_INT: return literalType.tag == TypeTags.INT && isSigned8LiteralValue((Long) literal.value); case TypeTags.UNSIGNED32_INT: return literalType.tag == TypeTags.INT && isUnsigned32LiteralValue((Long) literal.value); case TypeTags.UNSIGNED16_INT: return literalType.tag == TypeTags.INT && isUnsigned16LiteralValue((Long) literal.value); case TypeTags.UNSIGNED8_INT: return literalType.tag == TypeTags.INT && isUnsigned8LiteralValue((Long) literal.value); case TypeTags.CHAR_STRING: return literalType.tag == TypeTags.STRING && isCharLiteralValue((String) literal.value); default: return false; } } /** * Validate if the return type of the given function is a subtype of `error?`, containing `()`. * * @param function The function of which the return type should be validated * @param diagnosticCode The code to log if the return type is invalid */ public void validateErrorOrNilReturn(BLangFunction function, DiagnosticCode diagnosticCode) { BType returnType = function.returnTypeNode.getBType(); if (returnType.tag == TypeTags.NIL) { return; } if (returnType.tag == TypeTags.UNION) { Set<BType> memberTypes = getEffectiveMemberTypes(((BUnionType) returnType)); if (returnType.isNullable() && memberTypes.stream().allMatch(type -> type.tag == TypeTags.NIL || type.tag == TypeTags.ERROR)) { return; } } dlog.error(function.returnTypeNode.pos, diagnosticCode, function.returnTypeNode.getBType().toString()); } /** * Type vector of size two, to hold the source and the target types. * * @since 0.982.0 */ private static class TypePair { BType sourceType; BType targetType; public TypePair(BType sourceType, BType targetType) { this.sourceType = sourceType; this.targetType = targetType; } @Override public boolean equals(Object obj) { if (!(obj instanceof TypePair)) { return false; } TypePair other = (TypePair) obj; return this.sourceType.equals(other.sourceType) && this.targetType.equals(other.targetType); } @Override public int hashCode() { return Objects.hash(sourceType, targetType); } } /** * A functional interface for parameterizing the type of type checking that needs to be done on the source and * target types. * * @since 0.995.0 */ private interface TypeEqualityPredicate { boolean test(BType source, BType target, Set<TypePair> unresolvedTypes); } public boolean hasFillerValue(BType type) { switch (type.tag) { case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.STRING: case TypeTags.BOOLEAN: case TypeTags.JSON: case TypeTags.XML: case TypeTags.NIL: case TypeTags.TABLE: case TypeTags.ANYDATA: case TypeTags.MAP: case TypeTags.ANY: case TypeTags.NEVER: return true; case TypeTags.ARRAY: return checkFillerValue((BArrayType) type); case TypeTags.FINITE: return checkFillerValue((BFiniteType) type); case TypeTags.UNION: return checkFillerValue((BUnionType) type); case TypeTags.OBJECT: return checkFillerValue((BObjectType) type); case TypeTags.RECORD: return checkFillerValue((BRecordType) type); case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) type; if (tupleType.isCyclic) { return false; } return tupleType.getTupleTypes().stream().allMatch(eleType -> hasFillerValue(eleType)); case TypeTags.TYPEREFDESC: return hasFillerValue(getReferredType(type)); default: if (TypeTags.isIntegerTypeTag(type.tag)) { return true; } return false; } } private boolean checkFillerValue(BObjectType type) { if ((type.tsymbol.flags & Flags.CLASS) != Flags.CLASS) { return false; } BAttachedFunction initFunction = ((BObjectTypeSymbol) type.tsymbol).initializerFunc; if (initFunction == null) { return true; } if (initFunction.symbol.getReturnType().getKind() != TypeKind.NIL) { return false; } for (BVarSymbol bVarSymbol : initFunction.symbol.getParameters()) { if (!bVarSymbol.isDefaultable) { return false; } } return true; } /** * This will handle two types. Singleton : As singleton can have one value that value should it self be a valid fill * value Union : 1. if nil is a member it is the fill values 2. else all the values should belong to same type and * the default value for that type should be a member of the union precondition : value space should have at least * one element * * @param type BFiniteType union or finite * @return boolean whether type has a valid filler value or not */ private boolean checkFillerValue(BFiniteType type) { if (type.isNullable()) { return true; } if (type.getValueSpace().size() == 1) { return true; } Iterator iterator = type.getValueSpace().iterator(); BLangExpression firstElement = (BLangExpression) iterator.next(); boolean defaultFillValuePresent = isImplicitDefaultValue(firstElement); while (iterator.hasNext()) { BLangExpression value = (BLangExpression) iterator.next(); if (!isSameBasicType(value.getBType(), firstElement.getBType())) { return false; } if (!defaultFillValuePresent && isImplicitDefaultValue(value)) { defaultFillValuePresent = true; } } return defaultFillValuePresent; } private boolean hasImplicitDefaultValue(Set<BLangExpression> valueSpace) { for (BLangExpression expression : valueSpace) { if (isImplicitDefaultValue(expression)) { return true; } } return false; } private boolean checkFillerValue(BUnionType type) { if (type.isNullable()) { return true; } Set<BType> memberTypes = new HashSet<>(); boolean hasFillerValue = false; boolean defaultValuePresent = false; boolean finiteTypePresent = false; for (BType member : getAllTypes(type, true)) { if (member.tag == TypeTags.FINITE) { Set<BType> uniqueValues = getValueTypes(((BFiniteType) member).getValueSpace()); memberTypes.addAll(uniqueValues); if (!defaultValuePresent && hasImplicitDefaultValue(((BFiniteType) member).getValueSpace())) { defaultValuePresent = true; } finiteTypePresent = true; } else { memberTypes.add(member); } if (!hasFillerValue && hasFillerValue(member)) { hasFillerValue = true; } } if (!hasFillerValue) { return false; } Iterator<BType> iterator = memberTypes.iterator(); BType firstMember = iterator.next(); while (iterator.hasNext()) { if (!isSameBasicType(firstMember, iterator.next())) { return false; } } if (finiteTypePresent) { return defaultValuePresent; } return true; } private boolean isSameBasicType(BType source, BType target) { if (isSameType(source, target)) { return true; } if (TypeTags.isIntegerTypeTag(source.tag) && TypeTags.isIntegerTypeTag(target.tag)) { return true; } return false; } private Set<BType> getValueTypes(Set<BLangExpression> valueSpace) { Set<BType> uniqueType = new HashSet<>(); for (BLangExpression expression : valueSpace) { uniqueType.add(expression.getBType()); } return uniqueType; } private boolean isImplicitDefaultValue(BLangExpression expression) { if ((expression.getKind() == NodeKind.LITERAL) || (expression.getKind() == NodeKind.NUMERIC_LITERAL)) { BLangLiteral literalExpression = (BLangLiteral) expression; BType literalExprType = literalExpression.getBType(); Object value = literalExpression.getValue(); switch (literalExprType.getKind()) { case INT: case BYTE: return value.equals(Long.valueOf(0)); case STRING: return value == null || value.equals(""); case DECIMAL: case FLOAT: return value.equals(String.valueOf(0.0)); case BOOLEAN: return value.equals(Boolean.valueOf(false)); case NIL: return true; default: return false; } } return false; } private boolean checkFillerValue(BRecordType type) { for (BField field : type.fields.values()) { if (Symbols.isFlagOn(field.symbol.flags, Flags.OPTIONAL)) { continue; } if (Symbols.isFlagOn(field.symbol.flags, Flags.REQUIRED)) { return false; } } return true; } private boolean checkFillerValue(BArrayType type) { if (type.size == -1) { return true; } return hasFillerValue(type.eType); } /** * Get result type of the query output. * * @param type type of query expression. * @return result type. */ public BType resolveExprType(BType type) { switch (type.tag) { case TypeTags.STREAM: return ((BStreamType) type).constraint; case TypeTags.TABLE: return ((BTableType) type).constraint; case TypeTags.ARRAY: return ((BArrayType) type).eType; case TypeTags.UNION: List<BType> exprTypes = new ArrayList<>(((BUnionType) type).getMemberTypes()); for (BType returnType : exprTypes) { switch (returnType.tag) { case TypeTags.STREAM: return ((BStreamType) returnType).constraint; case TypeTags.TABLE: return ((BTableType) returnType).constraint; case TypeTags.ARRAY: return ((BArrayType) returnType).eType; case TypeTags.STRING: case TypeTags.XML: return returnType; } } default: return type; } } private boolean isSimpleBasicType(int tag) { switch (tag) { case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.BOOLEAN: case TypeTags.NIL: return true; default: return (TypeTags.isIntegerTypeTag(tag)) || (TypeTags.isStringTypeTag(tag)); } } /** * Check whether a type is an ordered type. * * @param type type. * @param hasCycle whether there is a cycle. * @return boolean whether the type is an ordered type or not. */ public boolean isOrderedType(BType type, boolean hasCycle) { switch (type.tag) { case TypeTags.UNION: BUnionType unionType = (BUnionType) type; if (hasCycle) { return true; } if (unionType.isCyclic) { hasCycle = true; } Set<BType> memberTypes = unionType.getMemberTypes(); boolean allMembersOrdered = false; BType firstTypeInUnion = getReferredType(memberTypes.iterator().next()); for (BType memType : memberTypes) { memType = getReferredType(memType); if (memType.tag == TypeTags.FINITE && firstTypeInUnion.tag == TypeTags.FINITE) { Set<BLangExpression> valSpace = ((BFiniteType) firstTypeInUnion).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); if (!checkValueSpaceHasSameType((BFiniteType) memType, baseExprType)) { return false; } } else if (memType.tag == TypeTags.UNION) { return isOrderedType(memType, hasCycle); } else if (memType.tag != firstTypeInUnion.tag && memType.tag != TypeTags.NIL && !isIntOrStringType(memType.tag, firstTypeInUnion.tag)) { return false; } allMembersOrdered = isOrderedType(memType, hasCycle); if (!allMembersOrdered) { break; } } return allMembersOrdered; case TypeTags.ARRAY: BType elementType = ((BArrayType) type).eType; return isOrderedType(elementType, hasCycle); case TypeTags.TUPLE: List<BType> tupleMemberTypes = ((BTupleType) type).tupleTypes; for (BType memType : tupleMemberTypes) { if (!isOrderedType(memType, hasCycle)) { return false; } } BType restType = ((BTupleType) type).restType; return restType == null || isOrderedType(restType, hasCycle); case TypeTags.FINITE: boolean isValueSpaceOrdered = false; Set<BLangExpression> valSpace = ((BFiniteType) type).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BLangExpression expr : valSpace) { if (!checkValueSpaceHasSameType((BFiniteType) type, baseExprType)) { return false; } isValueSpaceOrdered = isOrderedType(expr.getBType(), hasCycle); if (!isValueSpaceOrdered) { break; } } return isValueSpaceOrdered; case TypeTags.TYPEREFDESC: return isOrderedType(getReferredType(type), hasCycle); case TypeTags.INTERSECTION: return isOrderedType(getEffectiveTypeForIntersection(type), hasCycle); default: return isSimpleBasicType(type.tag); } } private boolean isIntOrStringType(int firstTypeTag, int secondTypeTag) { return ((TypeTags.isIntegerTypeTag(firstTypeTag) || firstTypeTag == TypeTags.BYTE) && (TypeTags.isIntegerTypeTag(secondTypeTag) || secondTypeTag == TypeTags.BYTE)) || ((TypeTags.isStringTypeTag(firstTypeTag)) && (TypeTags.isStringTypeTag(secondTypeTag))); } public boolean isUnionOfSimpleBasicTypes(BType bType) { BType type = getReferredType(bType); if (type.tag == TypeTags.UNION) { Set<BType> memberTypes = ((BUnionType) type).getMemberTypes(); for (BType memType : memberTypes) { memType = getReferredType(memType); if (!isSimpleBasicType(memType.tag)) { return false; } } return true; } return isSimpleBasicType(type.tag); } public BType findCompatibleType(BType type) { switch (type.tag) { case TypeTags.DECIMAL: case TypeTags.FLOAT: case TypeTags.XML: case TypeTags.XML_TEXT: return type; case TypeTags.INT: case TypeTags.BYTE: case TypeTags.SIGNED32_INT: case TypeTags.SIGNED16_INT: case TypeTags.SIGNED8_INT: case TypeTags.UNSIGNED32_INT: case TypeTags.UNSIGNED16_INT: case TypeTags.UNSIGNED8_INT: return symTable.intType; case TypeTags.STRING: case TypeTags.CHAR_STRING: return symTable.stringType; case TypeTags.UNION: LinkedHashSet<BType> memberTypes = ((BUnionType) type).getMemberTypes(); return findCompatibleType(memberTypes.iterator().next()); case TypeTags.TYPEREFDESC: return findCompatibleType(((BTypeReferenceType) type).referredType); default: Set<BLangExpression> valueSpace = ((BFiniteType) type).getValueSpace(); return findCompatibleType(valueSpace.iterator().next().getBType()); } } public boolean isNonNilSimpleBasicTypeOrString(BType bType) { BType type = getReferredType(bType); if (type.tag == TypeTags.UNION) { Set<BType> memberTypes = ((BUnionType) type).getMemberTypes(); for (BType member : memberTypes) { BType memType = getReferredType(member); if (memType.tag == TypeTags.NIL || !isSimpleBasicType(memType.tag)) { return false; } } return true; } return type.tag != TypeTags.NIL && isSimpleBasicType(type.tag); } public boolean isSubTypeOfReadOnlyOrIsolatedObjectUnion(BType bType) { BType type = getReferredType(bType); if (isInherentlyImmutableType(type) || Symbols.isFlagOn(type.flags, Flags.READONLY)) { return true; } int tag = type.tag; if (tag == TypeTags.OBJECT) { return isIsolated(type); } if (tag != TypeTags.UNION) { return false; } for (BType memberType : ((BUnionType) type).getMemberTypes()) { if (!isSubTypeOfReadOnlyOrIsolatedObjectUnion(memberType)) { return false; } } return true; } private boolean isIsolated(BType type) { return Symbols.isFlagOn(type.flags, Flags.ISOLATED); } private boolean isImmutable(BType type) { return Symbols.isFlagOn(type.flags, Flags.READONLY); } BType getTypeWithoutNil(BType type) { BType constraint = getReferredType(type); if (constraint.tag != TypeTags.UNION) { return constraint; } BUnionType unionType = (BUnionType) constraint; if (!unionType.isNullable()) { return unionType; } List<BType> nonNilTypes = new ArrayList<>(); for (BType memberType : unionType.getMemberTypes()) { if (!isAssignable(memberType, symTable.nilType)) { nonNilTypes.add(memberType); } } if (nonNilTypes.size() == 1) { return nonNilTypes.get(0); } return BUnionType.create(null, new LinkedHashSet<>(nonNilTypes)); } public boolean isNeverTypeOrStructureTypeWithARequiredNeverMember(BType type) { if (type == null) { return false; } Set<BType> visitedTypeSet = new HashSet<>(); visitedTypeSet.add(type); return isNeverTypeOrStructureTypeWithARequiredNeverMember(type, visitedTypeSet); } boolean isNeverTypeOrStructureTypeWithARequiredNeverMember(BType type, Set<BType> visitedTypeSet) { switch (type.tag) { case TypeTags.NEVER: return true; case TypeTags.RECORD: for (BField field : ((BRecordType) type).fields.values()) { if ((SymbolFlags.isFlagOn(field.symbol.flags, SymbolFlags.REQUIRED) || !SymbolFlags.isFlagOn(field.symbol.flags, SymbolFlags.OPTIONAL)) && !visitedTypeSet.contains(field.type) && isNeverTypeOrStructureTypeWithARequiredNeverMember(field.type, visitedTypeSet)) { return true; } } return false; case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) type; List<BType> tupleTypes = tupleType.tupleTypes; for (BType mem : tupleTypes) { if (!visitedTypeSet.add(mem)) { continue; } if (isNeverTypeOrStructureTypeWithARequiredNeverMember(mem, visitedTypeSet)) { return true; } } return false; case TypeTags.ARRAY: BArrayType arrayType = (BArrayType) type; visitedTypeSet.add(arrayType.eType); return arrayType.state != BArrayState.OPEN && isNeverTypeOrStructureTypeWithARequiredNeverMember(arrayType.eType, visitedTypeSet); case TypeTags.TYPEREFDESC: visitedTypeSet.add(type); return isNeverTypeOrStructureTypeWithARequiredNeverMember(getReferredType(type), visitedTypeSet); default: return false; } } private static class ListenerValidationModel { private final Types types; private final SymbolTable symtable; private final BType serviceNameType; boolean attachFound; boolean detachFound; boolean startFound; boolean gracefulStopFound; boolean immediateStopFound; public ListenerValidationModel(Types types, SymbolTable symTable) { this.types = types; this.symtable = symTable; this.serviceNameType = BUnionType.create(null, symtable.stringType, symtable.arrayStringType, symtable.nilType); } boolean isValidListener() { return attachFound && detachFound && startFound && gracefulStopFound && immediateStopFound; } private boolean checkMethods(List<BAttachedFunction> rhsFuncs) { for (BAttachedFunction func : rhsFuncs) { switch (func.funcName.value) { case "attach": if (!checkAttachMethod(func)) { return false; } break; case "detach": if (!checkDetachMethod(func)) { return false; } break; case "start": if (!checkStartMethod(func)) { return true; } break; case "gracefulStop": if (!checkGracefulStop(func)) { return false; } break; case "immediateStop": if (!checkImmediateStop(func)) { return false; } break; } } return isValidListener(); } private boolean emptyParamList(BAttachedFunction func) { return func.type.paramTypes.isEmpty() && func.type.restType != symtable.noType; } private boolean publicAndReturnsErrorOrNil(BAttachedFunction func) { if (!Symbols.isPublic(func.symbol)) { return false; } return types.isAssignable(func.type.retType, symtable.errorOrNilType); } private boolean isPublicNoParamReturnsErrorOrNil(BAttachedFunction func) { if (!publicAndReturnsErrorOrNil(func)) { return false; } return emptyParamList(func); } private boolean checkImmediateStop(BAttachedFunction func) { return immediateStopFound = isPublicNoParamReturnsErrorOrNil(func); } private boolean checkGracefulStop(BAttachedFunction func) { return gracefulStopFound = isPublicNoParamReturnsErrorOrNil(func); } private boolean checkStartMethod(BAttachedFunction func) { return startFound = publicAndReturnsErrorOrNil(func); } private boolean checkDetachMethod(BAttachedFunction func) { if (!publicAndReturnsErrorOrNil(func)) { return false; } if (func.type.paramTypes.size() != 1) { return false; } return detachFound = isServiceObject(func.type.paramTypes.get(0)); } private boolean checkAttachMethod(BAttachedFunction func) { if (!publicAndReturnsErrorOrNil(func)) { return false; } if (func.type.paramTypes.size() != 2) { return false; } BType firstParamType = func.type.paramTypes.get(0); if (!isServiceObject(firstParamType)) { return false; } BType secondParamType = func.type.paramTypes.get(1); boolean sameType = types.isAssignable(secondParamType, this.serviceNameType); return attachFound = sameType; } private boolean isServiceObject(BType bType) { BType type = types.getReferredType(bType); if (type.tag == TypeTags.UNION) { for (BType memberType : ((BUnionType) type).getMemberTypes()) { if (!isServiceObject(memberType)) { return false; } } return true; } if (type.tag != TypeTags.OBJECT) { return false; } return Symbols.isService(type.tsymbol); } } /** * Intersection type validation helper. * * @since 2.0.0 */ public static class IntersectionContext { Location lhsPos; Location rhsPos; BLangDiagnosticLog dlog; ContextOption contextOption; boolean ignoreDefaultValues; boolean createTypeDefs; boolean preferNonGenerativeIntersection; private IntersectionContext(BLangDiagnosticLog diaglog, Location left, Location right) { this.dlog = diaglog; this.lhsPos = left; this.rhsPos = right; this.contextOption = ContextOption.NON; this.ignoreDefaultValues = false; this.createTypeDefs = true; this.preferNonGenerativeIntersection = false; } /** * Create {@link IntersectionContext} used for calculating the intersection type when user * explicitly write intersection type. This will produce error messages explaining why there is no intersection * between two types. * * @return a {@link IntersectionContext} */ public static IntersectionContext from(BLangDiagnosticLog diaglog, Location left, Location right) { return new IntersectionContext(diaglog, left, right); } /** * Create {@link IntersectionContext} used for calculating the intersection type to see if there * is a intersection between the types. This does not emit error messages explaning why there is no intersection * between two types. This also does not generate type-def for the calculated intersection type. * Do not use this context to create a intersection type that uses the calculated type for any purpose other * than seeing if there is a interserction. * * @return a {@link IntersectionContext} */ public static IntersectionContext compilerInternalIntersectionTestContext() { IntersectionContext intersectionContext = new IntersectionContext(null, null, null); intersectionContext.ignoreDefaultValues = true; intersectionContext.createTypeDefs = false; return intersectionContext; } /** * Create {@link IntersectionContext} used for calculating the intersection type. * This does not emit error messages explaining why there is no intersection between two types. * * @return a {@link IntersectionContext} */ public static IntersectionContext compilerInternalIntersectionContext() { IntersectionContext diagnosticContext = new IntersectionContext(null, null, null); return diagnosticContext; } /** * Create {@link IntersectionContext} used for checking the existence of a valid intersection, irrespective * of default values. * Type definitions are not created. * This does not emit error messages explaining why there is no intersection between two types. * * @return a {@link IntersectionContext} */ public static IntersectionContext typeTestIntersectionExistenceContext() { IntersectionContext intersectionContext = new IntersectionContext(null, null, null); intersectionContext.ignoreDefaultValues = true; intersectionContext.preferNonGenerativeIntersection = true; intersectionContext.createTypeDefs = false; return intersectionContext; } /** * Create {@link IntersectionContext} used for creating effective types for the intersection of types, * irrespective of default values. * Type definitions are created. * This does not emit error messages explaining why there is no intersection between two types. * * @return a {@link IntersectionContext} */ public static IntersectionContext typeTestIntersectionCalculationContext() { IntersectionContext intersectionContext = new IntersectionContext(null, null, null); intersectionContext.ignoreDefaultValues = true; intersectionContext.preferNonGenerativeIntersection = true; intersectionContext.createTypeDefs = true; return intersectionContext; } public IntersectionContext switchLeft() { this.contextOption = ContextOption.LEFT; return this; } public IntersectionContext switchRight() { this.contextOption = ContextOption.RIGHT; return this; } private boolean logError(DiagnosticErrorCode diagnosticCode, Object... args) { Location pos = null; if (contextOption == ContextOption.LEFT && lhsPos != null) { pos = lhsPos; } else if (contextOption == ContextOption.RIGHT && rhsPos != null) { pos = rhsPos; } if (pos != null) { dlog.error(pos, diagnosticCode, args); return true; } return false; } } private enum ContextOption { LEFT, RIGHT, NON; } }
class Types { private static final CompilerContext.Key<Types> TYPES_KEY = new CompilerContext.Key<>(); private final Unifier unifier; private SymbolTable symTable; private SymbolResolver symResolver; private BLangDiagnosticLog dlog; private Names names; private int finiteTypeCount = 0; private BUnionType expandedXMLBuiltinSubtypes; private final BLangAnonymousModelHelper anonymousModelHelper; private int recordCount = 0; private SymbolEnv env; public static Types getInstance(CompilerContext context) { Types types = context.get(TYPES_KEY); if (types == null) { types = new Types(context); } return types; } public Types(CompilerContext context) { context.put(TYPES_KEY, this); this.symTable = SymbolTable.getInstance(context); this.symResolver = SymbolResolver.getInstance(context); this.dlog = BLangDiagnosticLog.getInstance(context); this.names = Names.getInstance(context); this.expandedXMLBuiltinSubtypes = BUnionType.create(null, symTable.xmlElementType, symTable.xmlCommentType, symTable.xmlPIType, symTable.xmlTextType); this.unifier = new Unifier(); this.anonymousModelHelper = BLangAnonymousModelHelper.getInstance(context); } public List<BType> checkTypes(BLangExpression node, List<BType> actualTypes, List<BType> expTypes) { List<BType> resTypes = new ArrayList<>(); for (int i = 0; i < actualTypes.size(); i++) { resTypes.add(checkType(node, actualTypes.get(i), expTypes.size() > i ? expTypes.get(i) : symTable.noType)); } return resTypes; } public BType checkType(BLangExpression node, BType actualType, BType expType) { return checkType(node, actualType, expType, DiagnosticErrorCode.INCOMPATIBLE_TYPES); } public BType checkType(BLangExpression expr, BType actualType, BType expType, DiagnosticCode diagCode) { expr.setDeterminedType(actualType); expr.setTypeCheckedType(checkType(expr.pos, actualType, expType, diagCode)); if (expr.getBType().tag == TypeTags.SEMANTIC_ERROR) { return expr.getBType(); } setImplicitCastExpr(expr, actualType, expType); return expr.getBType(); } public BType checkType(Location pos, BType actualType, BType expType, DiagnosticCode diagCode) { if (expType.tag == TypeTags.SEMANTIC_ERROR) { return expType; } else if (expType.tag == TypeTags.NONE) { return actualType; } else if (actualType.tag == TypeTags.SEMANTIC_ERROR) { return actualType; } else if (isAssignable(actualType, expType)) { return actualType; } dlog.error(pos, diagCode, expType, actualType); return symTable.semanticError; } public boolean isLax(BType type) { Set<BType> visited = new HashSet<>(); int result = isLaxType(type, visited); if (result == 1) { return true; } return false; } public int isLaxType(BType type, Set<BType> visited) { if (!visited.add(type)) { return -1; } switch (type.tag) { case TypeTags.JSON: case TypeTags.XML: case TypeTags.XML_ELEMENT: return 1; case TypeTags.MAP: return isLaxType(((BMapType) type).constraint, visited); case TypeTags.UNION: if (isSameType(type, symTable.jsonType)) { visited.add(type); return 1; } boolean atleastOneLaxType = false; for (BType member : ((BUnionType) type).getMemberTypes()) { int result = isLaxType(member, visited); if (result == -1) { continue; } if (result == 0) { return 0; } atleastOneLaxType = true; } return atleastOneLaxType ? 1 : 0; case TypeTags.TYPEREFDESC: return isLaxType(getReferredType(type), visited); } return 0; } public boolean isLaxType(BType type, Map<BType, Boolean> visited) { if (visited.containsKey(type)) { return visited.get(type); } switch (type.tag) { case TypeTags.JSON: case TypeTags.XML: case TypeTags.XML_ELEMENT: visited.put(type, true); return true; case TypeTags.MAP: boolean result = isLaxType(((BMapType) type).constraint, visited); visited.put(type, result); return result; case TypeTags.UNION: if (type == symTable.jsonType || isSameType(type, symTable.jsonType)) { visited.put(type, true); return true; } for (BType member : ((BUnionType) type).getMemberTypes()) { if (!isLaxType(member, visited)) { visited.put(type, false); return false; } } visited.put(type, true); return true; case TypeTags.TYPEREFDESC: return isLaxType(getReferredType(type), visited); } visited.put(type, false); return false; } public boolean isSameType(BType source, BType target) { return isSameType(source, target, new HashSet<>()); } public boolean isSameOrderedType(BType source, BType target) { return isSameOrderedType(source, target, new HashSet<>()); } private boolean isSameOrderedType(BType source, BType target, Set<TypePair> unresolvedTypes) { if (!unresolvedTypes.add(new TypePair(source, target))) { return true; } BTypeVisitor<BType, Boolean> orderedTypeVisitor = new BOrderedTypeVisitor(unresolvedTypes); return target.accept(orderedTypeVisitor, source); } public boolean isPureType(BType type) { IsPureTypeUniqueVisitor visitor = new IsPureTypeUniqueVisitor(); return visitor.visit(type); } public boolean isAnydata(BType type) { IsAnydataUniqueVisitor visitor = new IsAnydataUniqueVisitor(); return visitor.visit(type); } private boolean isSameType(BType source, BType target, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(source, target); if (unresolvedTypes.contains(pair)) { return true; } unresolvedTypes.add(pair); BTypeVisitor<BType, Boolean> sameTypeVisitor = new BSameTypeVisitor(unresolvedTypes); return target.accept(sameTypeVisitor, source); } public boolean isValueType(BType type) { switch (type.tag) { case TypeTags.BOOLEAN: case TypeTags.BYTE: case TypeTags.DECIMAL: case TypeTags.FLOAT: case TypeTags.INT: case TypeTags.STRING: case TypeTags.SIGNED32_INT: case TypeTags.SIGNED16_INT: case TypeTags.SIGNED8_INT: case TypeTags.UNSIGNED32_INT: case TypeTags.UNSIGNED16_INT: case TypeTags.UNSIGNED8_INT: case TypeTags.CHAR_STRING: return true; case TypeTags.TYPEREFDESC: return isValueType(getReferredType(type)); default: return false; } } boolean isBasicNumericType(BType bType) { BType type = getReferredType(bType); return type.tag < TypeTags.STRING || TypeTags.isIntegerTypeTag(type.tag); } boolean finiteTypeContainsNumericTypeValues(BFiniteType finiteType) { return finiteType.getValueSpace().stream().anyMatch(valueExpr -> isBasicNumericType(valueExpr.getBType())); } public boolean containsErrorType(BType bType) { BType type = getReferredType(bType); if (type.tag == TypeTags.UNION) { return ((BUnionType) type).getMemberTypes().stream() .anyMatch(this::containsErrorType); } if (type.tag == TypeTags.READONLY) { return true; } return type.tag == TypeTags.ERROR; } public boolean isSubTypeOfList(BType bType) { BType type = getReferredType(bType); if (type.tag != TypeTags.UNION) { return isSubTypeOfBaseType(type, TypeTags.ARRAY) || isSubTypeOfBaseType(type, TypeTags.TUPLE); } return ((BUnionType) type).getMemberTypes().stream().allMatch(this::isSubTypeOfList); } BType resolvePatternTypeFromMatchExpr(BLangErrorBindingPattern errorBindingPattern, BLangExpression matchExpr, SymbolEnv env) { if (matchExpr == null) { return errorBindingPattern.getBType(); } BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), matchExpr.getBType(), errorBindingPattern.getBType(), env); if (intersectionType == symTable.semanticError) { return symTable.noType; } return intersectionType; } public BType resolvePatternTypeFromMatchExpr(BLangListBindingPattern listBindingPattern, BLangVarBindingPatternMatchPattern varBindingPatternMatchPattern, SymbolEnv env) { BTupleType listBindingPatternType = (BTupleType) listBindingPattern.getBType(); if (varBindingPatternMatchPattern.matchExpr == null) { return listBindingPatternType; } BType matchExprType = varBindingPatternMatchPattern.matchExpr.getBType(); BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), matchExprType, listBindingPatternType, env); if (intersectionType != symTable.semanticError) { return intersectionType; } return symTable.noType; } public BType resolvePatternTypeFromMatchExpr(BLangListMatchPattern listMatchPattern, BTupleType listMatchPatternType, SymbolEnv env) { if (listMatchPattern.matchExpr == null) { return listMatchPatternType; } BType matchExprType = listMatchPattern.matchExpr.getBType(); BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), matchExprType, listMatchPatternType, env); if (intersectionType != symTable.semanticError) { return intersectionType; } return symTable.noType; } BType resolvePatternTypeFromMatchExpr(BLangErrorMatchPattern errorMatchPattern, BLangExpression matchExpr) { if (matchExpr == null) { return errorMatchPattern.getBType(); } BType matchExprType = matchExpr.getBType(); BType patternType = errorMatchPattern.getBType(); if (isAssignable(matchExprType, patternType)) { return matchExprType; } if (isAssignable(patternType, matchExprType)) { return patternType; } return symTable.noType; } BType resolvePatternTypeFromMatchExpr(BLangConstPattern constPattern, BLangExpression constPatternExpr) { if (constPattern.matchExpr == null) { if (constPatternExpr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) { return ((BLangSimpleVarRef) constPatternExpr).symbol.type; } else { return constPatternExpr.getBType(); } } BType matchExprType = constPattern.matchExpr.getBType(); BType constMatchPatternExprType = constPatternExpr.getBType(); if (constPatternExpr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) { BLangSimpleVarRef constVarRef = (BLangSimpleVarRef) constPatternExpr; BType constVarRefSymbolType = constVarRef.symbol.type; if (isAssignable(constVarRefSymbolType, matchExprType)) { return constVarRefSymbolType; } return symTable.noType; } BLangLiteral constPatternLiteral = (BLangLiteral) constPatternExpr; if (containsAnyType(constMatchPatternExprType)) { return matchExprType; } else if (containsAnyType(matchExprType)) { return constMatchPatternExprType; } BType matchExprReferredType = getReferredType(matchExprType); BType constExprReferredType = getReferredType(constMatchPatternExprType); if (matchExprReferredType.tag == TypeTags.BYTE && constExprReferredType.tag == TypeTags.INT) { return matchExprType; } if (isAssignable(constMatchPatternExprType, matchExprType)) { return constMatchPatternExprType; } if (matchExprReferredType.tag == TypeTags.UNION) { for (BType memberType : ((BUnionType) matchExprReferredType).getMemberTypes()) { if (getReferredType(memberType).tag == TypeTags.FINITE) { if (isAssignableToFiniteType(memberType, constPatternLiteral)) { return memberType; } } else { if (isAssignable(constMatchPatternExprType, matchExprType)) { return constMatchPatternExprType; } } } } else if (matchExprReferredType.tag == TypeTags.FINITE) { if (isAssignableToFiniteType(matchExprType, constPatternLiteral)) { return matchExprType; } } return symTable.noType; } BType resolvePatternTypeFromMatchExpr(BLangMappingMatchPattern mappingMatchPattern, BType patternType, SymbolEnv env) { if (mappingMatchPattern.matchExpr == null) { return patternType; } BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), mappingMatchPattern.matchExpr.getBType(), patternType, env); if (intersectionType == symTable.semanticError) { return symTable.noType; } return intersectionType; } public BType resolvePatternTypeFromMatchExpr(BLangMappingBindingPattern mappingBindingPattern, BLangVarBindingPatternMatchPattern varBindingPatternMatchPattern, SymbolEnv env) { BRecordType mappingBindingPatternType = (BRecordType) mappingBindingPattern.getBType(); if (varBindingPatternMatchPattern.matchExpr == null) { return mappingBindingPatternType; } BType intersectionType = getTypeIntersection( IntersectionContext.compilerInternalIntersectionContext(), varBindingPatternMatchPattern.matchExpr.getBType(), mappingBindingPatternType, env); if (intersectionType == symTable.semanticError) { return symTable.noType; } return intersectionType; } private boolean containsAnyType(BType type) { if (type.tag != TypeTags.UNION) { return type.tag == TypeTags.ANY; } for (BType memberTypes : ((BUnionType) type).getMemberTypes()) { if (memberTypes.tag == TypeTags.ANY) { return true; } } return false; } private boolean containsAnyDataType(BType type) { if (type.tag != TypeTags.UNION) { return type.tag == TypeTags.ANYDATA; } for (BType memberTypes : ((BUnionType) type).getMemberTypes()) { if (memberTypes.tag == TypeTags.ANYDATA) { return true; } } return false; } BType mergeTypes(BType typeFirst, BType typeSecond) { if (containsAnyType(typeFirst) && !containsErrorType(typeSecond)) { return typeSecond; } if (containsAnyType(typeSecond) && !containsErrorType(typeFirst)) { return typeFirst; } if (containsAnyDataType(typeFirst) && !containsErrorType(typeSecond)) { return typeSecond; } if (containsAnyDataType(typeSecond) && !containsErrorType(typeFirst)) { return typeFirst; } if (isSameBasicType(typeFirst, typeSecond)) { return typeFirst; } return BUnionType.create(null, typeFirst, typeSecond); } public boolean isSubTypeOfMapping(BType bType) { BType type = getReferredType(bType); if (type.tag == TypeTags.INTERSECTION) { return isSubTypeOfMapping(((BIntersectionType) type).effectiveType); } if (type.tag != TypeTags.UNION) { return isSubTypeOfBaseType(type, TypeTags.MAP) || isSubTypeOfBaseType(type, TypeTags.RECORD); } return ((BUnionType) type).getMemberTypes().stream().allMatch(this::isSubTypeOfMapping); } public boolean isSubTypeOfBaseType(BType bType, int baseTypeTag) { BType type = getReferredType(bType); if (type.tag == TypeTags.INTERSECTION) { type = ((BIntersectionType) type).effectiveType; } if (type.tag != TypeTags.UNION) { if ((TypeTags.isIntegerTypeTag(type.tag) || type.tag == TypeTags.BYTE) && TypeTags.INT == baseTypeTag) { return true; } if (TypeTags.isStringTypeTag(type.tag) && TypeTags.STRING == baseTypeTag) { return true; } return type.tag == baseTypeTag || (baseTypeTag == TypeTags.TUPLE && type.tag == TypeTags.ARRAY) || (baseTypeTag == TypeTags.ARRAY && type.tag == TypeTags.TUPLE); } if (TypeTags.isXMLTypeTag(baseTypeTag)) { return true; } return isUnionMemberTypesSubTypeOfBaseType(((BUnionType) type).getMemberTypes(), baseTypeTag); } private boolean isUnionMemberTypesSubTypeOfBaseType(LinkedHashSet<BType> memberTypes, int baseTypeTag) { for (BType type : memberTypes) { if (!isSubTypeOfBaseType(type, baseTypeTag)) { return false; } } return true; } /** * Checks whether source type is assignable to the target type. * <p> * Source type is assignable to the target type if, * 1) the target type is any and the source type is not a value type. * 2) there exists an implicit cast symbol from source to target. * 3) both types are JSON and the target constraint is no type. * 4) both types are array type and both array types are assignable. * 5) both types are MAP and the target constraint is any type or constraints are structurally equivalent. * * @param source type. * @param target type. * @return true if source type is assignable to the target type. */ public boolean isAssignable(BType source, BType target) { return isAssignable(source, target, new HashSet<>()); } private boolean isAssignable(BType source, BType target, Set<TypePair> unresolvedTypes) { if (isSameType(source, target)) { return true; } int sourceTag = source.tag; int targetTag = target.tag; if (sourceTag == TypeTags.TYPEREFDESC || targetTag == TypeTags.TYPEREFDESC) { return isAssignable(getReferredType(source), getReferredType(target), unresolvedTypes); } if (isNeverTypeOrStructureTypeWithARequiredNeverMember(source)) { return true; } if (!Symbols.isFlagOn(source.flags, Flags.PARAMETERIZED) && !isInherentlyImmutableType(target) && Symbols.isFlagOn(target.flags, Flags.READONLY) && !isInherentlyImmutableType(source) && isMutable(source)) { return false; } if (sourceTag == TypeTags.INTERSECTION) { return isAssignable(((BIntersectionType) source).effectiveType, targetTag != TypeTags.INTERSECTION ? target : ((BIntersectionType) target).effectiveType, unresolvedTypes); } if (targetTag == TypeTags.INTERSECTION) { return isAssignable(source, ((BIntersectionType) target).effectiveType, unresolvedTypes); } if (sourceTag == TypeTags.PARAMETERIZED_TYPE) { return isParameterizedTypeAssignable(source, target, unresolvedTypes); } if (sourceTag == TypeTags.BYTE && targetTag == TypeTags.INT) { return true; } if (TypeTags.isXMLTypeTag(sourceTag) && TypeTags.isXMLTypeTag(targetTag)) { return isXMLTypeAssignable(source, target, unresolvedTypes); } if (sourceTag == TypeTags.CHAR_STRING && targetTag == TypeTags.STRING) { return true; } if (sourceTag == TypeTags.ERROR && targetTag == TypeTags.ERROR) { return isErrorTypeAssignable((BErrorType) source, (BErrorType) target, unresolvedTypes); } else if (sourceTag == TypeTags.ERROR && targetTag == TypeTags.ANY) { return false; } if (sourceTag == TypeTags.NIL && (isNullable(target) || targetTag == TypeTags.JSON)) { return true; } if (targetTag == TypeTags.ANY && !containsErrorType(source) && !isValueType(source)) { return true; } if (targetTag == TypeTags.ANYDATA && !containsErrorType(source) && isAnydata(source)) { return true; } if (targetTag == TypeTags.READONLY) { if ((isInherentlyImmutableType(source) || Symbols.isFlagOn(source.flags, Flags.READONLY))) { return true; } if (isAssignable(source, symTable.anyAndReadonlyOrError, unresolvedTypes)) { return true; } } if (sourceTag == TypeTags.READONLY && isAssignable(symTable.anyAndReadonlyOrError, target, unresolvedTypes)) { return true; } if (targetTag == TypeTags.MAP && sourceTag == TypeTags.RECORD) { BRecordType recordType = (BRecordType) source; return isAssignableRecordType(recordType, target, unresolvedTypes); } if (targetTag == TypeTags.RECORD && sourceTag == TypeTags.MAP) { return isAssignableMapType((BMapType) source, (BRecordType) target); } if (targetTag == TypeTags.TYPEDESC && sourceTag == TypeTags.TYPEDESC) { return isAssignable(((BTypedescType) source).constraint, (((BTypedescType) target).constraint), unresolvedTypes); } if (targetTag == TypeTags.TABLE && sourceTag == TypeTags.TABLE) { return isAssignableTableType((BTableType) source, (BTableType) target, unresolvedTypes); } if (targetTag == TypeTags.STREAM && sourceTag == TypeTags.STREAM) { return isAssignableStreamType((BStreamType) source, (BStreamType) target, unresolvedTypes); } if (isBuiltInTypeWidenPossible(source, target) == TypeTestResult.TRUE) { return true; } if (sourceTag == TypeTags.FINITE) { return isFiniteTypeAssignable((BFiniteType) source, target, unresolvedTypes); } if ((targetTag == TypeTags.UNION || sourceTag == TypeTags.UNION) && isAssignableToUnionType(source, target, unresolvedTypes)) { return true; } if (targetTag == TypeTags.JSON) { if (sourceTag == TypeTags.JSON) { return true; } if (sourceTag == TypeTags.TUPLE) { return isTupleTypeAssignable(source, target, unresolvedTypes); } if (sourceTag == TypeTags.ARRAY) { return isArrayTypesAssignable((BArrayType) source, target, unresolvedTypes); } if (sourceTag == TypeTags.MAP) { return isAssignable(((BMapType) source).constraint, target, unresolvedTypes); } if (sourceTag == TypeTags.RECORD) { return isAssignableRecordType((BRecordType) source, target, unresolvedTypes); } } if (targetTag == TypeTags.FUTURE && sourceTag == TypeTags.FUTURE) { if (((BFutureType) target).constraint.tag == TypeTags.NONE) { return true; } return isAssignable(((BFutureType) source).constraint, ((BFutureType) target).constraint, unresolvedTypes); } if (targetTag == TypeTags.MAP && sourceTag == TypeTags.MAP) { if (((BMapType) target).constraint.tag == TypeTags.ANY && ((BMapType) source).constraint.tag != TypeTags.UNION) { return true; } return isAssignable(((BMapType) source).constraint, ((BMapType) target).constraint, unresolvedTypes); } if ((sourceTag == TypeTags.OBJECT || sourceTag == TypeTags.RECORD) && (targetTag == TypeTags.OBJECT || targetTag == TypeTags.RECORD)) { return checkStructEquivalency(source, target, unresolvedTypes); } if (sourceTag == TypeTags.TUPLE && targetTag == TypeTags.ARRAY) { return isTupleTypeAssignableToArrayType((BTupleType) source, (BArrayType) target, unresolvedTypes); } if (sourceTag == TypeTags.ARRAY && targetTag == TypeTags.TUPLE) { return isArrayTypeAssignableToTupleType((BArrayType) source, (BTupleType) target, unresolvedTypes); } if (sourceTag == TypeTags.TUPLE || targetTag == TypeTags.TUPLE) { return isTupleTypeAssignable(source, target, unresolvedTypes); } if (sourceTag == TypeTags.INVOKABLE && targetTag == TypeTags.INVOKABLE) { return isFunctionTypeAssignable((BInvokableType) source, (BInvokableType) target, new HashSet<>()); } return sourceTag == TypeTags.ARRAY && targetTag == TypeTags.ARRAY && isArrayTypesAssignable((BArrayType) source, target, unresolvedTypes); } private boolean isMutable(BType type) { if (Symbols.isFlagOn(type.flags, Flags.READONLY)) { return false; } if (type.tag != TypeTags.UNION) { return true; } BUnionType unionType = (BUnionType) type; for (BType memberType : unionType.getMemberTypes()) { if (!Symbols.isFlagOn(memberType.flags, Flags.READONLY)) { return true; } } unionType.flags |= Flags.READONLY; BTypeSymbol tsymbol = unionType.tsymbol; if (tsymbol != null) { tsymbol.flags |= Flags.READONLY; } return false; } private boolean isParameterizedTypeAssignable(BType source, BType target, Set<TypePair> unresolvedTypes) { BType resolvedSourceType = unifier.build(source); if (target.tag != TypeTags.PARAMETERIZED_TYPE) { return isAssignable(resolvedSourceType, target, unresolvedTypes); } if (((BParameterizedType) source).paramIndex != ((BParameterizedType) target).paramIndex) { return false; } return isAssignable(resolvedSourceType, unifier.build(target), unresolvedTypes); } private boolean isAssignableRecordType(BRecordType recordType, BType type, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(recordType, type); if (!unresolvedTypes.add(pair)) { return true; } BType targetType; switch (type.tag) { case TypeTags.MAP: targetType = ((BMapType) type).constraint; break; case TypeTags.JSON: targetType = type; break; default: throw new IllegalArgumentException("Incompatible target type: " + type.toString()); } return recordFieldsAssignableToType(recordType, targetType, unresolvedTypes); } private boolean isAssignableStreamType(BStreamType sourceStreamType, BStreamType targetStreamType, Set<TypePair> unresolvedTypes) { return isAssignable(sourceStreamType.constraint, targetStreamType.constraint, unresolvedTypes) && isAssignable(sourceStreamType.completionType, targetStreamType.completionType, unresolvedTypes); } private boolean recordFieldsAssignableToType(BRecordType recordType, BType targetType, Set<TypePair> unresolvedTypes) { for (BField field : recordType.fields.values()) { if (!isAssignable(field.type, targetType, unresolvedTypes)) { return false; } } if (!recordType.sealed) { return isAssignable(recordType.restFieldType, targetType, unresolvedTypes); } return true; } private boolean isAssignableTableType(BTableType sourceTableType, BTableType targetTableType, Set<TypePair> unresolvedTypes) { if (!isAssignable(sourceTableType.constraint, targetTableType.constraint, unresolvedTypes)) { return false; } if (targetTableType.keyTypeConstraint == null && targetTableType.fieldNameList == null) { return true; } if (targetTableType.keyTypeConstraint != null) { if (sourceTableType.keyTypeConstraint != null && (isAssignable(sourceTableType.keyTypeConstraint, targetTableType.keyTypeConstraint, unresolvedTypes))) { return true; } if (sourceTableType.fieldNameList == null) { return false; } List<BType> fieldTypes = new ArrayList<>(); sourceTableType.fieldNameList.stream() .map(f -> getTableConstraintField(sourceTableType.constraint, f)) .filter(Objects::nonNull).map(f -> f.type).forEach(fieldTypes::add); if (fieldTypes.size() == 1) { return isAssignable(fieldTypes.get(0), targetTableType.keyTypeConstraint, unresolvedTypes); } BTupleType tupleType = new BTupleType(fieldTypes); return isAssignable(tupleType, targetTableType.keyTypeConstraint, unresolvedTypes); } return targetTableType.fieldNameList.equals(sourceTableType.fieldNameList); } BField getTableConstraintField(BType constraintType, String fieldName) { switch (constraintType.tag) { case TypeTags.RECORD: Map<String, BField> fieldList = ((BRecordType) constraintType).getFields(); return fieldList.get(fieldName); case TypeTags.UNION: BUnionType unionType = (BUnionType) constraintType; Set<BType> memTypes = unionType.getMemberTypes(); List<BField> fields = memTypes.stream().map(type -> getTableConstraintField(type, fieldName)) .filter(Objects::nonNull).collect(Collectors.toList()); if (fields.size() != memTypes.size()) { return null; } if (fields.stream().allMatch(field -> isAssignable(field.type, fields.get(0).type) && isAssignable(fields.get(0).type, field.type))) { return fields.get(0); } break; case TypeTags.INTERSECTION: return getTableConstraintField(((BIntersectionType) constraintType).effectiveType, fieldName); case TypeTags.TYPEREFDESC: return getTableConstraintField(((BTypeReferenceType) constraintType).referredType, fieldName); } return null; } private boolean isAssignableMapType(BMapType sourceMapType, BRecordType targetRecType) { if (targetRecType.sealed) { return false; } for (BField field : targetRecType.fields.values()) { if (!Symbols.isFlagOn(field.symbol.flags, Flags.OPTIONAL)) { return false; } if (hasIncompatibleReadOnlyFlags(field.symbol.flags, sourceMapType.flags)) { return false; } if (!isAssignable(sourceMapType.constraint, field.type)) { return false; } } return isAssignable(sourceMapType.constraint, targetRecType.restFieldType); } private boolean hasIncompatibleReadOnlyFlags(long targetFlags, long sourceFlags) { return Symbols.isFlagOn(targetFlags, Flags.READONLY) && !Symbols.isFlagOn(sourceFlags, Flags.READONLY); } private boolean isErrorTypeAssignable(BErrorType source, BErrorType target, Set<TypePair> unresolvedTypes) { if (target == symTable.errorType) { return true; } TypePair pair = new TypePair(source, target); if (unresolvedTypes.contains(pair)) { return true; } unresolvedTypes.add(pair); return isAssignable(source.detailType, target.detailType, unresolvedTypes) && target.typeIdSet.isAssignableFrom(source.typeIdSet); } private boolean isXMLTypeAssignable(BType sourceT, BType targetT, Set<TypePair> unresolvedTypes) { BType sourceType = getReferredType(sourceT); BType targetType = getReferredType(targetT); int sourceTag = sourceType.tag; int targetTag = targetType.tag; if (targetTag == TypeTags.XML) { BXMLType target = (BXMLType) targetType; if (target.constraint != null) { if (TypeTags.isXMLNonSequenceType(sourceTag)) { return isAssignable(sourceType, target.constraint, unresolvedTypes); } BXMLType source = (BXMLType) sourceType; if (source.constraint.tag == TypeTags.NEVER) { if (sourceTag == targetTag) { return true; } return isAssignable(source, target.constraint, unresolvedTypes); } return isAssignable(source.constraint, target, unresolvedTypes); } return true; } if (sourceTag == TypeTags.XML) { BXMLType source = (BXMLType) sourceType; if (targetTag == TypeTags.XML_TEXT) { if (source.constraint != null) { if (source.constraint.tag == TypeTags.NEVER || source.constraint.tag == TypeTags.XML_TEXT) { return true; } else { return isAssignable(source.constraint, targetType, unresolvedTypes); } } return false; } } return sourceTag == targetTag; } private boolean isTupleTypeAssignable(BType source, BType target, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(source, target); if (unresolvedTypes.contains(pair)) { return true; } if (source.tag == TypeTags.TUPLE && ((BTupleType) source).isCyclic) { unresolvedTypes.add(pair); } if (target.tag == TypeTags.JSON && source.tag == TypeTags.TUPLE) { BTupleType rhsTupleType = (BTupleType) source; for (BType tupleType : rhsTupleType.tupleTypes) { if (!isAssignable(tupleType, target, unresolvedTypes)) { return false; } } if (rhsTupleType.restType != null) { return isAssignable(rhsTupleType.restType, target, unresolvedTypes); } return true; } if (source.tag != TypeTags.TUPLE || target.tag != TypeTags.TUPLE) { return false; } BTupleType lhsTupleType = (BTupleType) target; BTupleType rhsTupleType = (BTupleType) source; if (lhsTupleType.restType == null && rhsTupleType.restType != null) { return false; } if (lhsTupleType.restType == null && lhsTupleType.tupleTypes.size() != rhsTupleType.tupleTypes.size()) { return false; } if (lhsTupleType.restType != null && rhsTupleType.restType != null) { if (!isAssignable(rhsTupleType.restType, lhsTupleType.restType, unresolvedTypes)) { return false; } } if (lhsTupleType.tupleTypes.size() > rhsTupleType.tupleTypes.size()) { return false; } for (int i = 0; i < rhsTupleType.tupleTypes.size(); i++) { BType lhsType = (lhsTupleType.tupleTypes.size() > i) ? lhsTupleType.tupleTypes.get(i) : lhsTupleType.restType; if (!isAssignable(rhsTupleType.tupleTypes.get(i), lhsType, unresolvedTypes)) { return false; } } return true; } private boolean checkAllTupleMembersBelongNoType(List<BType> tupleTypes) { boolean isNoType = false; for (BType memberType : tupleTypes) { switch (memberType.tag) { case TypeTags.NONE: isNoType = true; break; case TypeTags.TUPLE: isNoType = checkAllTupleMembersBelongNoType(((BTupleType) memberType).tupleTypes); if (!isNoType) { return false; } break; default: return false; } } return isNoType; } private boolean isTupleTypeAssignableToArrayType(BTupleType source, BArrayType target, Set<TypePair> unresolvedTypes) { if (target.state != BArrayState.OPEN && (source.restType != null || source.tupleTypes.size() != target.size)) { return false; } List<BType> sourceTypes = new ArrayList<>(source.tupleTypes); if (source.restType != null) { sourceTypes.add(source.restType); } return sourceTypes.stream() .allMatch(tupleElemType -> isAssignable(tupleElemType, target.eType, unresolvedTypes)); } private boolean isArrayTypeAssignableToTupleType(BArrayType source, BTupleType target, Set<TypePair> unresolvedTypes) { BType restType = target.restType; List<BType> tupleTypes = target.tupleTypes; if (source.state == BArrayState.OPEN) { if (restType == null || !tupleTypes.isEmpty()) { return false; } return isAssignable(source.eType, restType, unresolvedTypes); } int targetTupleMemberSize = tupleTypes.size(); int sourceArraySize = source.size; if (targetTupleMemberSize > sourceArraySize) { return false; } if (restType == null && targetTupleMemberSize < sourceArraySize) { return false; } BType sourceElementType = source.eType; for (BType memType : tupleTypes) { if (!isAssignable(sourceElementType, memType, unresolvedTypes)) { return false; } } if (restType == null) { return true; } return sourceArraySize == targetTupleMemberSize || isAssignable(sourceElementType, restType, unresolvedTypes); } private boolean isArrayTypesAssignable(BArrayType source, BType target, Set<TypePair> unresolvedTypes) { BType sourceElementType = source.getElementType(); if (target.tag == TypeTags.ARRAY) { BArrayType targetArrayType = (BArrayType) target; BType targetElementType = targetArrayType.getElementType(); if (targetArrayType.state == BArrayState.OPEN) { return isAssignable(sourceElementType, targetElementType, unresolvedTypes); } if (targetArrayType.size != source.size) { return false; } return isAssignable(sourceElementType, targetElementType, unresolvedTypes); } else if (target.tag == TypeTags.JSON) { return isAssignable(sourceElementType, target, unresolvedTypes); } else if (target.tag == TypeTags.ANYDATA) { return isAssignable(sourceElementType, target, unresolvedTypes); } return false; } private boolean isFunctionTypeAssignable(BInvokableType source, BInvokableType target, Set<TypePair> unresolvedTypes) { if (hasIncompatibleIsolatedFlags(source, target) || hasIncompatibleTransactionalFlags(source, target)) { return false; } if (Symbols.isFlagOn(target.flags, Flags.ANY_FUNCTION)) { return true; } if (containsTypeParams(target)) { if (source.paramTypes.size() != target.paramTypes.size()) { return false; } for (int i = 0; i < source.paramTypes.size(); i++) { BType sourceParam = source.paramTypes.get(i); BType targetParam = target.paramTypes.get(i); boolean isTypeParam = TypeParamAnalyzer.isTypeParam(targetParam); if (isTypeParam) { if (!isAssignable(sourceParam, targetParam)) { return false; } } else { if (!isAssignable(targetParam, sourceParam)) { return false; } } } if (source.retType == null && target.retType == null) { return true; } else if (source.retType == null || target.retType == null) { return false; } return isAssignable(source.retType, target.retType, unresolvedTypes); } return checkFunctionTypeEquality(source, target, unresolvedTypes, (s, t, ut) -> isAssignable(t, s, ut)); } public boolean isInherentlyImmutableType(BType type) { if (isValueType(type)) { return true; } switch (type.tag) { case TypeTags.XML_TEXT: case TypeTags.FINITE: case TypeTags.READONLY: case TypeTags.NIL: case TypeTags.ERROR: case TypeTags.INVOKABLE: case TypeTags.TYPEDESC: case TypeTags.HANDLE: return true; case TypeTags.XML: return ((BXMLType) type).constraint.tag == TypeTags.NEVER; case TypeTags.TYPEREFDESC: return isInherentlyImmutableType(((BTypeReferenceType) type).referredType); } return false; } public BType getReferredType(BType type) { BType constraint = type; if (type.tag == TypeTags.TYPEREFDESC) { constraint = getReferredType(((BTypeReferenceType) type).referredType); } return constraint; } boolean isSelectivelyImmutableType(BType type) { return isSelectivelyImmutableType(type, new HashSet<>(), false); } boolean isSelectivelyImmutableType(BType type, boolean forceCheck) { return isSelectivelyImmutableType(type, new HashSet<>(), forceCheck); } public boolean isSelectivelyImmutableType(BType type, Set<BType> unresolvedTypes) { return isSelectivelyImmutableType(type, unresolvedTypes, false); } private boolean isSelectivelyImmutableType(BType type, Set<BType> unresolvedTypes, boolean forceCheck) { return isSelectivelyImmutableType(type, false, unresolvedTypes, forceCheck); } private boolean isSelectivelyImmutableType(BType input, boolean disallowReadOnlyObjects, Set<BType> unresolvedTypes, boolean forceCheck) { BType type = getReferredType(input); if (isInherentlyImmutableType(type) || !(type instanceof SelectivelyImmutableReferenceType)) { return false; } if (!unresolvedTypes.add(type)) { return true; } if (!forceCheck && ((SelectivelyImmutableReferenceType) type).getImmutableType() != null) { return true; } switch (type.tag) { case TypeTags.ANY: case TypeTags.ANYDATA: case TypeTags.JSON: case TypeTags.XML: case TypeTags.XML_COMMENT: case TypeTags.XML_ELEMENT: case TypeTags.XML_PI: return true; case TypeTags.ARRAY: BType elementType = ((BArrayType) type).eType; return isInherentlyImmutableType(elementType) || isSelectivelyImmutableType(elementType, unresolvedTypes, forceCheck); case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) type; for (BType tupMemType : tupleType.tupleTypes) { if (!isInherentlyImmutableType(tupMemType) && !isSelectivelyImmutableType(tupMemType, unresolvedTypes, forceCheck)) { return false; } } BType tupRestType = tupleType.restType; if (tupRestType == null) { return true; } return isInherentlyImmutableType(tupRestType) || isSelectivelyImmutableType(tupRestType, unresolvedTypes, forceCheck); case TypeTags.RECORD: BRecordType recordType = (BRecordType) type; for (BField field : recordType.fields.values()) { BType fieldType = field.type; if (!isInherentlyImmutableType(fieldType) && !isSelectivelyImmutableType(fieldType, unresolvedTypes, forceCheck)) { return false; } } BType recordRestType = recordType.restFieldType; if (recordRestType == null || recordRestType == symTable.noType) { return true; } return isInherentlyImmutableType(recordRestType) || isSelectivelyImmutableType(recordRestType, unresolvedTypes, forceCheck); case TypeTags.MAP: BType constraintType = ((BMapType) type).constraint; return isInherentlyImmutableType(constraintType) || isSelectivelyImmutableType(constraintType, unresolvedTypes, forceCheck); case TypeTags.OBJECT: BObjectType objectType = (BObjectType) type; for (BField field : objectType.fields.values()) { BType fieldType = field.type; if (!isInherentlyImmutableType(fieldType) && !isSelectivelyImmutableType(fieldType, unresolvedTypes, forceCheck)) { return false; } } return true; case TypeTags.TABLE: BType tableConstraintType = ((BTableType) type).constraint; return isInherentlyImmutableType(tableConstraintType) || isSelectivelyImmutableType(tableConstraintType, unresolvedTypes, forceCheck); case TypeTags.UNION: boolean readonlyIntersectionExists = false; for (BType memberType : ((BUnionType) type).getMemberTypes()) { if (isInherentlyImmutableType(memberType) || isSelectivelyImmutableType(memberType, unresolvedTypes, forceCheck)) { readonlyIntersectionExists = true; } } return readonlyIntersectionExists; case TypeTags.INTERSECTION: return isSelectivelyImmutableType(((BIntersectionType) type).effectiveType, unresolvedTypes, forceCheck); case TypeTags.TYPEREFDESC: return isSelectivelyImmutableType(((BTypeReferenceType) type).referredType, unresolvedTypes, forceCheck); } return false; } private boolean containsTypeParams(BInvokableType type) { boolean hasParameterizedTypes = type.paramTypes.stream() .anyMatch(t -> { if (t.tag == TypeTags.FUNCTION_POINTER) { return containsTypeParams((BInvokableType) t); } return TypeParamAnalyzer.isTypeParam(t); }); if (hasParameterizedTypes) { return hasParameterizedTypes; } if (type.retType.tag == TypeTags.FUNCTION_POINTER) { return containsTypeParams((BInvokableType) type.retType); } return TypeParamAnalyzer.isTypeParam(type.retType); } private boolean isSameFunctionType(BInvokableType source, BInvokableType target, Set<TypePair> unresolvedTypes) { return checkFunctionTypeEquality(source, target, unresolvedTypes, this::isSameType); } private boolean checkFunctionTypeEquality(BInvokableType source, BInvokableType target, Set<TypePair> unresolvedTypes, TypeEqualityPredicate equality) { if (hasIncompatibleIsolatedFlags(source, target) || hasIncompatibleTransactionalFlags(source, target)) { return false; } if (Symbols.isFlagOn(target.flags, Flags.ANY_FUNCTION) && Symbols.isFlagOn(source.flags, Flags.ANY_FUNCTION)) { return true; } if (Symbols.isFlagOn(target.flags, Flags.ANY_FUNCTION) || Symbols.isFlagOn(source.flags, Flags.ANY_FUNCTION)) { return false; } if (source.paramTypes.size() != target.paramTypes.size()) { return false; } for (int i = 0; i < source.paramTypes.size(); i++) { if (!equality.test(source.paramTypes.get(i), target.paramTypes.get(i), unresolvedTypes)) { return false; } } if ((source.restType != null && target.restType == null) || target.restType != null && source.restType == null) { return false; } else if (source.restType != null && !equality.test(source.restType, target.restType, unresolvedTypes)) { return false; } if (source.retType == null && target.retType == null) { return true; } else if (source.retType == null || target.retType == null) { return false; } return isAssignable(source.retType, target.retType, unresolvedTypes); } private boolean hasIncompatibleIsolatedFlags(BInvokableType source, BInvokableType target) { return Symbols.isFlagOn(target.flags, Flags.ISOLATED) && !Symbols.isFlagOn(source.flags, Flags.ISOLATED); } private boolean hasIncompatibleTransactionalFlags(BInvokableType source, BInvokableType target) { return Symbols.isFlagOn(source.flags, Flags.TRANSACTIONAL) && !Symbols.isFlagOn(target.flags, Flags.TRANSACTIONAL); } public boolean isSameArrayType(BType source, BType target, Set<TypePair> unresolvedTypes) { if (target.tag != TypeTags.ARRAY || source.tag != TypeTags.ARRAY) { return false; } BArrayType lhsArrayType = (BArrayType) target; BArrayType rhsArrayType = (BArrayType) source; boolean hasSameTypeElements = isSameType(lhsArrayType.eType, rhsArrayType.eType, unresolvedTypes); if (lhsArrayType.state == BArrayState.OPEN) { return (rhsArrayType.state == BArrayState.OPEN) && hasSameTypeElements; } return checkSealedArraySizeEquality(rhsArrayType, lhsArrayType) && hasSameTypeElements; } public boolean isSameStreamType(BType source, BType target, Set<TypePair> unresolvedTypes) { if (target.tag != TypeTags.STREAM || source.tag != TypeTags.STREAM) { return false; } BStreamType lhsStreamType = (BStreamType) target; BStreamType rhsStreamType = (BStreamType) source; return isSameType(lhsStreamType.constraint, rhsStreamType.constraint, unresolvedTypes) && isSameType(lhsStreamType.completionType, rhsStreamType.completionType, unresolvedTypes); } public boolean checkSealedArraySizeEquality(BArrayType rhsArrayType, BArrayType lhsArrayType) { return lhsArrayType.size == rhsArrayType.size; } public boolean checkStructEquivalency(BType rhsType, BType lhsType) { return checkStructEquivalency(rhsType, lhsType, new HashSet<>()); } private boolean checkStructEquivalency(BType rhsType, BType lhsType, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(rhsType, lhsType); if (unresolvedTypes.contains(pair)) { return true; } unresolvedTypes.add(pair); if (rhsType.tag == TypeTags.OBJECT && lhsType.tag == TypeTags.OBJECT) { return checkObjectEquivalency((BObjectType) rhsType, (BObjectType) lhsType, unresolvedTypes); } if (rhsType.tag == TypeTags.RECORD && lhsType.tag == TypeTags.RECORD) { return checkRecordEquivalency((BRecordType) rhsType, (BRecordType) lhsType, unresolvedTypes); } return false; } public boolean checkObjectEquivalency(BObjectType rhsType, BObjectType lhsType, Set<TypePair> unresolvedTypes) { if (Symbols.isFlagOn(lhsType.flags, Flags.ISOLATED) && !Symbols.isFlagOn(rhsType.flags, Flags.ISOLATED)) { return false; } BObjectTypeSymbol lhsStructSymbol = (BObjectTypeSymbol) lhsType.tsymbol; BObjectTypeSymbol rhsStructSymbol = (BObjectTypeSymbol) rhsType.tsymbol; List<BAttachedFunction> lhsFuncs = lhsStructSymbol.attachedFuncs; List<BAttachedFunction> rhsFuncs = ((BObjectTypeSymbol) rhsType.tsymbol).attachedFuncs; int lhsAttachedFuncCount = getObjectFuncCount(lhsStructSymbol); int rhsAttachedFuncCount = getObjectFuncCount(rhsStructSymbol); boolean isLhsAService = Symbols.isService(lhsStructSymbol); if (isLhsAService && !Symbols.isService(rhsStructSymbol)) { return false; } if (lhsType.fields.size() > rhsType.fields.size() || lhsAttachedFuncCount > rhsAttachedFuncCount) { return false; } for (BField bField : lhsType.fields.values()) { if (Symbols.isPrivate(bField.symbol)) { return false; } } for (BAttachedFunction func : lhsFuncs) { if (Symbols.isPrivate(func.symbol)) { return false; } } for (BField lhsField : lhsType.fields.values()) { BField rhsField = rhsType.fields.get(lhsField.name.value); if (rhsField == null || !isInSameVisibilityRegion(lhsField.symbol, rhsField.symbol) || !isAssignable(rhsField.type, lhsField.type, unresolvedTypes)) { return false; } } for (BAttachedFunction lhsFunc : lhsFuncs) { if (lhsFunc == lhsStructSymbol.initializerFunc) { continue; } if (isLhsAService && Symbols.isResource(lhsFunc.symbol)) { continue; } BAttachedFunction rhsFunc = getMatchingInvokableType(rhsFuncs, lhsFunc, unresolvedTypes); if (rhsFunc == null || !isInSameVisibilityRegion(lhsFunc.symbol, rhsFunc.symbol)) { return false; } if (Symbols.isRemote(lhsFunc.symbol) != Symbols.isRemote(rhsFunc.symbol)) { return false; } } return lhsType.typeIdSet.isAssignableFrom(rhsType.typeIdSet); } private int getObjectFuncCount(BObjectTypeSymbol sym) { int count = 0; for (BAttachedFunction attachedFunc : sym.attachedFuncs) { if (!Symbols.isResource(attachedFunc.symbol)) { count++; } } if (sym.initializerFunc != null && sym.attachedFuncs.contains(sym.initializerFunc)) { return count - 1; } return count; } public boolean checkRecordEquivalency(BRecordType rhsType, BRecordType lhsType, Set<TypePair> unresolvedTypes) { if (lhsType.sealed && !rhsType.sealed) { return false; } if (!rhsType.sealed && !isAssignable(rhsType.restFieldType, lhsType.restFieldType, unresolvedTypes)) { return false; } return checkFieldEquivalency(lhsType, rhsType, unresolvedTypes); } public void setForeachTypedBindingPatternType(BLangForeach foreachNode) { BType collectionType = getReferredType(foreachNode.collection.getBType()); BType varType; switch (collectionType.tag) { case TypeTags.STRING: varType = symTable.charStringType; break; case TypeTags.ARRAY: BArrayType arrayType = (BArrayType) collectionType; varType = arrayType.eType; break; case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) collectionType; LinkedHashSet<BType> tupleTypes = new LinkedHashSet<>(tupleType.tupleTypes); if (tupleType.restType != null) { tupleTypes.add(tupleType.restType); } varType = tupleTypes.size() == 1 ? tupleTypes.iterator().next() : BUnionType.create(null, tupleTypes); break; case TypeTags.MAP: BMapType bMapType = (BMapType) collectionType; varType = bMapType.constraint; break; case TypeTags.RECORD: BRecordType recordType = (BRecordType) collectionType; varType = inferRecordFieldType(recordType); break; case TypeTags.XML: BType constraint = getReferredType(((BXMLType) collectionType).constraint); while (constraint.tag == TypeTags.XML) { collectionType = constraint; constraint = ((BXMLType) collectionType).constraint; } switch (constraint.tag) { case TypeTags.XML_ELEMENT: varType = symTable.xmlElementType; break; case TypeTags.XML_COMMENT: varType = symTable.xmlCommentType; break; case TypeTags.XML_TEXT: varType = symTable.xmlTextType; break; case TypeTags.XML_PI: varType = symTable.xmlPIType; break; case TypeTags.NEVER: varType = symTable.neverType; break; default: Set<BType> collectionTypes = getEffectiveMemberTypes((BUnionType) constraint); Set<BType> builtinXMLConstraintTypes = getEffectiveMemberTypes ((BUnionType) ((BXMLType) symTable.xmlType).constraint); if (collectionTypes.size() == 4 && builtinXMLConstraintTypes.equals(collectionTypes)) { varType = symTable.xmlType; } else { LinkedHashSet<BType> collectionTypesInSymTable = new LinkedHashSet<>(); for (BType subType : collectionTypes) { switch (subType.tag) { case TypeTags.XML_ELEMENT: collectionTypesInSymTable.add(symTable.xmlElementType); break; case TypeTags.XML_COMMENT: collectionTypesInSymTable.add(symTable.xmlCommentType); break; case TypeTags.XML_TEXT: collectionTypesInSymTable.add(symTable.xmlTextType); break; case TypeTags.XML_PI: collectionTypesInSymTable.add(symTable.xmlPIType); break; } } varType = BUnionType.create(null, collectionTypesInSymTable); } } break; case TypeTags.XML_TEXT: varType = symTable.xmlTextType; break; case TypeTags.TABLE: BTableType tableType = (BTableType) collectionType; varType = tableType.constraint; break; case TypeTags.STREAM: BStreamType streamType = (BStreamType) collectionType; if (streamType.constraint.tag == TypeTags.NONE) { varType = symTable.anydataType; break; } varType = streamType.constraint; List<BType> completionType = getAllTypes(streamType.completionType, true); if (completionType.stream().anyMatch(type -> type.tag != TypeTags.NIL)) { BType actualType = BUnionType.create(null, varType, streamType.completionType); dlog.error(foreachNode.collection.pos, DiagnosticErrorCode.INCOMPATIBLE_TYPES, varType, actualType); } break; case TypeTags.OBJECT: BUnionType nextMethodReturnType = getVarTypeFromIterableObject((BObjectType) collectionType); if (nextMethodReturnType != null) { foreachNode.resultType = getRecordType(nextMethodReturnType); BType valueType = (foreachNode.resultType != null) ? ((BRecordType) foreachNode.resultType).fields.get("value").type : null; BType errorType = getErrorType(nextMethodReturnType); if (errorType != null) { BType actualType = BUnionType.create(null, valueType, errorType); dlog.error(foreachNode.collection.pos, DiagnosticErrorCode.INVALID_ITERABLE_COMPLETION_TYPE_IN_FOREACH_NEXT_FUNCTION, actualType, errorType); } foreachNode.nillableResultType = nextMethodReturnType; foreachNode.varType = valueType; return; } case TypeTags.SEMANTIC_ERROR: foreachNode.varType = symTable.semanticError; foreachNode.resultType = symTable.semanticError; foreachNode.nillableResultType = symTable.semanticError; return; default: foreachNode.varType = symTable.semanticError; foreachNode.resultType = symTable.semanticError; foreachNode.nillableResultType = symTable.semanticError; dlog.error(foreachNode.collection.pos, DiagnosticErrorCode.ITERABLE_NOT_SUPPORTED_COLLECTION, collectionType); return; } BInvokableSymbol iteratorSymbol = (BInvokableSymbol) symResolver.lookupLangLibMethod(collectionType, names.fromString(BLangCompilerConstants.ITERABLE_COLLECTION_ITERATOR_FUNC)); BObjectType objectType = (BObjectType) getReferredType(iteratorSymbol.retType); BUnionType nextMethodReturnType = (BUnionType) getResultTypeOfNextInvocation(objectType); foreachNode.varType = varType; foreachNode.resultType = getRecordType(nextMethodReturnType); foreachNode.nillableResultType = nextMethodReturnType; } public void setInputClauseTypedBindingPatternType(BLangInputClause bLangInputClause) { if (bLangInputClause.collection == null) { return; } BType collectionType = bLangInputClause.collection.getBType(); BType varType = visitCollectionType(bLangInputClause, collectionType); if (varType.tag == TypeTags.SEMANTIC_ERROR) { return; } BInvokableSymbol iteratorSymbol = (BInvokableSymbol) symResolver.lookupLangLibMethod(collectionType, names.fromString(BLangCompilerConstants.ITERABLE_COLLECTION_ITERATOR_FUNC)); BUnionType nextMethodReturnType = (BUnionType) getResultTypeOfNextInvocation((BObjectType) getReferredType(iteratorSymbol.retType)); bLangInputClause.varType = varType; bLangInputClause.resultType = getRecordType(nextMethodReturnType); bLangInputClause.nillableResultType = nextMethodReturnType; } private BType visitCollectionType(BLangInputClause bLangInputClause, BType collectionType) { switch (collectionType.tag) { case TypeTags.STRING: return symTable.stringType; case TypeTags.ARRAY: BArrayType arrayType = (BArrayType) collectionType; return arrayType.eType; case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) collectionType; LinkedHashSet<BType> tupleTypes = new LinkedHashSet<>(tupleType.tupleTypes); if (tupleType.restType != null) { tupleTypes.add(tupleType.restType); } return tupleTypes.size() == 1 ? tupleTypes.iterator().next() : BUnionType.create(null, tupleTypes); case TypeTags.MAP: BMapType bMapType = (BMapType) collectionType; return bMapType.constraint; case TypeTags.RECORD: BRecordType recordType = (BRecordType) collectionType; return inferRecordFieldType(recordType); case TypeTags.XML: BXMLType xmlType = (BXMLType) collectionType; return xmlType.constraint; case TypeTags.XML_TEXT: return symTable.xmlTextType; case TypeTags.TABLE: BTableType tableType = (BTableType) collectionType; return tableType.constraint; case TypeTags.STREAM: BStreamType streamType = (BStreamType) collectionType; if (streamType.constraint.tag == TypeTags.NONE) { return symTable.anydataType; } return streamType.constraint; case TypeTags.OBJECT: if (!isAssignable(collectionType, symTable.iterableType)) { dlog.error(bLangInputClause.collection.pos, DiagnosticErrorCode.INVALID_ITERABLE_OBJECT_TYPE, bLangInputClause.collection.getBType(), symTable.iterableType); bLangInputClause.varType = symTable.semanticError; bLangInputClause.resultType = symTable.semanticError; bLangInputClause.nillableResultType = symTable.semanticError; break; } BUnionType nextMethodReturnType = getVarTypeFromIterableObject((BObjectType) collectionType); if (nextMethodReturnType != null) { bLangInputClause.resultType = getRecordType(nextMethodReturnType); bLangInputClause.nillableResultType = nextMethodReturnType; bLangInputClause.varType = ((BRecordType) bLangInputClause.resultType).fields.get("value").type; break; } case TypeTags.SEMANTIC_ERROR: bLangInputClause.varType = symTable.semanticError; bLangInputClause.resultType = symTable.semanticError; bLangInputClause.nillableResultType = symTable.semanticError; break; case TypeTags.TYPEREFDESC: return visitCollectionType(bLangInputClause, getReferredType(collectionType)); default: bLangInputClause.varType = symTable.semanticError; bLangInputClause.resultType = symTable.semanticError; bLangInputClause.nillableResultType = symTable.semanticError; dlog.error(bLangInputClause.collection.pos, DiagnosticErrorCode.ITERABLE_NOT_SUPPORTED_COLLECTION, collectionType); } return symTable.semanticError; } public BUnionType getVarTypeFromIterableObject(BObjectType collectionType) { BObjectTypeSymbol objectTypeSymbol = (BObjectTypeSymbol) collectionType.tsymbol; for (BAttachedFunction func : objectTypeSymbol.attachedFuncs) { if (func.funcName.value.equals(BLangCompilerConstants.ITERABLE_COLLECTION_ITERATOR_FUNC)) { return getVarTypeFromIteratorFunc(func); } } return null; } private BUnionType getVarTypeFromIteratorFunc(BAttachedFunction candidateIteratorFunc) { if (!candidateIteratorFunc.type.paramTypes.isEmpty()) { return null; } BType returnType = candidateIteratorFunc.type.retType; return getVarTypeFromIteratorFuncReturnType(returnType); } public BUnionType getVarTypeFromIteratorFuncReturnType(BType type) { BObjectTypeSymbol objectTypeSymbol; BType returnType = getReferredType(type); if (returnType.tag != TypeTags.OBJECT) { return null; } objectTypeSymbol = (BObjectTypeSymbol) returnType.tsymbol; for (BAttachedFunction func : objectTypeSymbol.attachedFuncs) { if (func.funcName.value.equals(BLangCompilerConstants.NEXT_FUNC)) { return getVarTypeFromNextFunc(func); } } return null; } private BUnionType getVarTypeFromNextFunc(BAttachedFunction nextFunc) { BType returnType; if (!nextFunc.type.paramTypes.isEmpty()) { return null; } returnType = nextFunc.type.retType; if (checkNextFuncReturnType(returnType)) { return (BUnionType) returnType; } return null; } private boolean checkNextFuncReturnType(BType returnType) { if (returnType.tag != TypeTags.UNION) { return false; } List<BType> types = getAllTypes(returnType, true); boolean containsCompletionType = types.removeIf(type -> type.tag == TypeTags.NIL); containsCompletionType = types.removeIf(type -> type.tag == TypeTags.ERROR) || containsCompletionType; if (!containsCompletionType) { return false; } if (types.size() != 1) { return false; } if (types.get(0).tag != TypeTags.RECORD) { return false; } BRecordType recordType = (BRecordType) types.get(0); return checkRecordTypeInNextFuncReturnType(recordType); } private boolean checkRecordTypeInNextFuncReturnType(BRecordType recordType) { if (!recordType.sealed) { return false; } if (recordType.fields.size() != 1) { return false; } return recordType.fields.containsKey(BLangCompilerConstants.VALUE_FIELD); } private BRecordType getRecordType(BUnionType type) { for (BType member : type.getMemberTypes()) { BType referredRecordType = getReferredType(member); if (referredRecordType.tag == TypeTags.RECORD) { return (BRecordType) referredRecordType; } } return null; } public BErrorType getErrorType(BUnionType type) { for (BType member : type.getMemberTypes()) { member = getEffectiveTypeForIntersection(getReferredType(member)); if (member.tag == TypeTags.ERROR) { return (BErrorType) member; } else if (member.tag == TypeTags.UNION) { BErrorType e = getErrorType((BUnionType) member); if (e != null) { return e; } } } return null; } public BType getResultTypeOfNextInvocation(BObjectType iteratorType) { BAttachedFunction nextFunc = getAttachedFuncFromObject(iteratorType, BLangCompilerConstants.NEXT_FUNC); return Objects.requireNonNull(nextFunc).type.retType; } public BAttachedFunction getAttachedFuncFromObject(BObjectType objectType, String funcName) { BObjectTypeSymbol iteratorSymbol = (BObjectTypeSymbol) objectType.tsymbol; for (BAttachedFunction bAttachedFunction : iteratorSymbol.attachedFuncs) { if (funcName.equals(bAttachedFunction.funcName.value)) { return bAttachedFunction; } } return null; } public BType inferRecordFieldType(BRecordType recordType) { Map<String, BField> fields = recordType.fields; BUnionType unionType = BUnionType.create(null); if (!recordType.sealed) { unionType.add(recordType.restFieldType); } else if (fields.size() == 0) { unionType.add(symTable.neverType); } for (BField field : fields.values()) { if (isAssignable(field.type, unionType)) { continue; } if (isAssignable(unionType, field.type)) { unionType = BUnionType.create(null); } unionType.add(field.type); } if (unionType.getMemberTypes().size() > 1) { unionType.tsymbol = Symbols.createTypeSymbol(SymTag.UNION_TYPE, Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, recordType.tsymbol.pkgID, null, recordType.tsymbol.owner, symTable.builtinPos, VIRTUAL); return unionType; } return unionType.getMemberTypes().iterator().next(); } public BType getTypeWithEffectiveIntersectionTypes(BType bType) { BType type = getReferredType(bType); BType effectiveType = null; if (type.tag == TypeTags.INTERSECTION) { effectiveType = ((BIntersectionType) type).effectiveType; type = effectiveType; } if (type.tag != TypeTags.UNION) { return Objects.requireNonNullElse(effectiveType, bType); } LinkedHashSet<BType> members = new LinkedHashSet<>(); boolean hasDifferentMember = false; for (BType memberType : ((BUnionType) type).getMemberTypes()) { effectiveType = getTypeWithEffectiveIntersectionTypes(memberType); effectiveType = getReferredType(effectiveType); if (effectiveType != memberType) { hasDifferentMember = true; } members.add(effectiveType); } if (hasDifferentMember) { return BUnionType.create(null, members); } return bType; } /** * Enum to represent type test result. * * @since 1.2.0 */ enum TypeTestResult { NOT_FOUND, TRUE, FALSE } TypeTestResult isBuiltInTypeWidenPossible(BType actualType, BType targetType) { int targetTag = getReferredType(targetType).tag; int actualTag = getReferredType(actualType).tag; if (actualTag < TypeTags.JSON && targetTag < TypeTags.JSON) { switch (actualTag) { case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: if (targetTag == TypeTags.BOOLEAN || targetTag == TypeTags.STRING) { return TypeTestResult.FALSE; } break; case TypeTags.BOOLEAN: if (targetTag == TypeTags.INT || targetTag == TypeTags.BYTE || targetTag == TypeTags.FLOAT || targetTag == TypeTags.DECIMAL || targetTag == TypeTags.STRING) { return TypeTestResult.FALSE; } break; case TypeTags.STRING: if (targetTag == TypeTags.INT || targetTag == TypeTags.BYTE || targetTag == TypeTags.FLOAT || targetTag == TypeTags.DECIMAL || targetTag == TypeTags.BOOLEAN) { return TypeTestResult.FALSE; } break; } } switch (actualTag) { case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.BOOLEAN: case TypeTags.STRING: case TypeTags.SIGNED32_INT: case TypeTags.SIGNED16_INT: case TypeTags.SIGNED8_INT: case TypeTags.UNSIGNED32_INT: case TypeTags.UNSIGNED16_INT: case TypeTags.UNSIGNED8_INT: case TypeTags.CHAR_STRING: if (targetTag == TypeTags.JSON || targetTag == TypeTags.ANYDATA || targetTag == TypeTags.ANY || targetTag == TypeTags.READONLY) { return TypeTestResult.TRUE; } break; case TypeTags.ANYDATA: case TypeTags.TYPEDESC: if (targetTag == TypeTags.ANY) { return TypeTestResult.TRUE; } break; default: } if (TypeTags.isIntegerTypeTag(targetTag) && actualTag == targetTag) { return TypeTestResult.FALSE; } if ((TypeTags.isIntegerTypeTag(actualTag) || actualTag == TypeTags.BYTE) && (TypeTags.isIntegerTypeTag(targetTag) || targetTag == TypeTags.BYTE)) { return checkBuiltInIntSubtypeWidenPossible(actualType, targetType); } if (actualTag == TypeTags.CHAR_STRING && TypeTags.STRING == targetTag) { return TypeTestResult.TRUE; } return TypeTestResult.NOT_FOUND; } public boolean isImplicitlyCastable(BType actual, BType target) { /* The word Builtin refers for Compiler known types. */ BType targetType = getReferredType(target); BType actualType = getReferredType(actual); BType newTargetType = targetType; int targetTypeTag = targetType.tag; if ((targetTypeTag == TypeTags.UNION || targetTypeTag == TypeTags.FINITE) && isValueType(actualType)) { newTargetType = symTable.anyType; } else if (targetTypeTag == TypeTags.INTERSECTION) { newTargetType = ((BIntersectionType) targetType).effectiveType; } TypeTestResult result = isBuiltInTypeWidenPossible(actualType, newTargetType); if (result != TypeTestResult.NOT_FOUND) { return result == TypeTestResult.TRUE; } if (isValueType(targetType) && (actualType.tag == TypeTags.FINITE || (actualType.tag == TypeTags.UNION && ((BUnionType) actualType).getMemberTypes().stream() .anyMatch(type -> type.tag == TypeTags.FINITE && isAssignable(type, targetType))))) { return TypeTags.isIntegerTypeTag(targetTypeTag) || targetType.tag == TypeTags.BYTE || targetTypeTag == TypeTags.FLOAT || targetTypeTag == TypeTags.DECIMAL || TypeTags.isStringTypeTag(targetTypeTag) || targetTypeTag == TypeTags.BOOLEAN; } else if (isValueType(targetType) && actualType.tag == TypeTags.UNION && ((BUnionType) actualType).getMemberTypes().stream().allMatch(type -> isAssignable(type, targetType))) { return true; } else if (targetTypeTag == TypeTags.ERROR && (actualType.tag == TypeTags.UNION && isAllErrorMembers((BUnionType) actualType))) { return true; } return false; } public boolean isTypeCastable(BLangExpression expr, BType source, BType target, SymbolEnv env) { BType sourceType = getReferredType(source); BType targetType = getReferredType(target); if (sourceType.tag == TypeTags.SEMANTIC_ERROR || targetType.tag == TypeTags.SEMANTIC_ERROR || sourceType == targetType) { return true; } IntersectionContext intersectionContext = IntersectionContext.compilerInternalIntersectionTestContext(); BType errorIntersection = getTypeIntersection(intersectionContext, sourceType, symTable.errorType, env); if (errorIntersection != symTable.semanticError && getTypeIntersection(intersectionContext, symTable.errorType, targetType, env) == symTable.semanticError) { return false; } if (isAssignable(sourceType, targetType) || isAssignable(targetType, sourceType)) { return true; } if (isNumericConversionPossible(expr, sourceType, targetType)) { return true; } if (sourceType.tag == TypeTags.ANY && targetType.tag == TypeTags.READONLY) { return true; } boolean validTypeCast = false; if (sourceType instanceof BUnionType) { if (getTypeForUnionTypeMembersAssignableToType((BUnionType) sourceType, targetType, env, intersectionContext, new LinkedHashSet<>()) != symTable.semanticError) { validTypeCast = true; } } if (targetType instanceof BUnionType) { if (getTypeForUnionTypeMembersAssignableToType((BUnionType) targetType, sourceType, env, intersectionContext, new LinkedHashSet<>()) != symTable.semanticError) { validTypeCast = true; } } if (sourceType.tag == TypeTags.FINITE) { if (getTypeForFiniteTypeValuesAssignableToType((BFiniteType) sourceType, targetType) != symTable.semanticError) { validTypeCast = true; } } if (targetType.tag == TypeTags.FINITE) { if (getTypeForFiniteTypeValuesAssignableToType((BFiniteType) targetType, sourceType) != symTable.semanticError) { validTypeCast = true; } } if (validTypeCast) { if (isValueType(sourceType)) { setImplicitCastExpr(expr, sourceType, symTable.anyType); } return true; } return false; } boolean isNumericConversionPossible(BLangExpression expr, BType sourceType, BType targetType) { final boolean isSourceNumericType = isBasicNumericType(sourceType); final boolean isTargetNumericType = isBasicNumericType(targetType); if (isSourceNumericType && isTargetNumericType) { return true; } if (targetType.tag == TypeTags.UNION) { HashSet<Integer> typeTags = new HashSet<>(); for (BType bType : ((BUnionType) targetType).getMemberTypes()) { if (isBasicNumericType(bType)) { typeTags.add(bType.tag); if (typeTags.size() > 1) { return false; } } } } if (!isTargetNumericType && targetType.tag != TypeTags.UNION) { return false; } if (isSourceNumericType) { setImplicitCastExpr(expr, sourceType, symTable.anyType); return true; } switch (sourceType.tag) { case TypeTags.ANY: case TypeTags.ANYDATA: case TypeTags.JSON: return true; case TypeTags.UNION: for (BType memType : ((BUnionType) sourceType).getMemberTypes()) { BType referredType = getReferredType(memType); if (isBasicNumericType(referredType) || (referredType.tag == TypeTags.FINITE && finiteTypeContainsNumericTypeValues((BFiniteType) referredType))) { return true; } } break; case TypeTags.FINITE: if (finiteTypeContainsNumericTypeValues((BFiniteType) sourceType)) { return true; } break; } return false; } private boolean isAllErrorMembers(BUnionType actualType) { return actualType.getMemberTypes().stream().allMatch(t -> isAssignable(t, symTable.errorType)); } public void setImplicitCastExpr(BLangExpression expr, BType actualType, BType targetType) { BType expType = getReferredType(targetType); if (!isImplicitlyCastable(actualType, expType)) { return; } BLangTypeConversionExpr implicitConversionExpr = (BLangTypeConversionExpr) TreeBuilder.createTypeConversionNode(); implicitConversionExpr.pos = expr.pos; implicitConversionExpr.expr = expr.impConversionExpr == null ? expr : expr.impConversionExpr; implicitConversionExpr.setBType(expType); implicitConversionExpr.targetType = expType; implicitConversionExpr.internal = true; expr.impConversionExpr = implicitConversionExpr; } public BType getElementType(BType type) { if (type.tag != TypeTags.ARRAY) { return type; } return getElementType(((BArrayType) type).getElementType()); } public boolean checkListenerCompatibilityAtServiceDecl(BType type) { if (type.tag == TypeTags.UNION) { int listenerCompatibleTypeCount = 0; for (BType memberType : ((BUnionType) type).getMemberTypes()) { if (memberType.tag != TypeTags.ERROR) { if (!checkListenerCompatibility(memberType)) { return false; } listenerCompatibleTypeCount++; } } return listenerCompatibleTypeCount > 0; } return checkListenerCompatibility(type); } public boolean checkListenerCompatibility(BType bType) { BType type = getReferredType(bType); if (type.tag == TypeTags.UNION) { BUnionType unionType = (BUnionType) type; for (BType memberType : unionType.getMemberTypes()) { if (!checkListenerCompatibility(memberType)) { return false; } } return true; } if (type.tag != TypeTags.OBJECT) { return false; } BObjectType rhsType = (BObjectType) type; List<BAttachedFunction> rhsFuncs = ((BStructureTypeSymbol) rhsType.tsymbol).attachedFuncs; ListenerValidationModel listenerValidationModel = new ListenerValidationModel(this, symTable); return listenerValidationModel.checkMethods(rhsFuncs); } public boolean isValidErrorDetailType(BType detailType) { switch (detailType.tag) { case TypeTags.TYPEREFDESC: return isValidErrorDetailType(((BTypeReferenceType) detailType).referredType); case TypeTags.MAP: case TypeTags.RECORD: return isAssignable(detailType, symTable.detailType); } return false; } private boolean isSealedRecord(BType recordType) { return recordType.getKind() == TypeKind.RECORD && ((BRecordType) recordType).sealed; } private boolean isNullable(BType fieldType) { return fieldType.isNullable(); } private class BSameTypeVisitor implements BTypeVisitor<BType, Boolean> { Set<TypePair> unresolvedTypes; BSameTypeVisitor(Set<TypePair> unresolvedTypes) { this.unresolvedTypes = unresolvedTypes; } @Override public Boolean visit(BType target, BType source) { BType t = getReferredType(target); BType s = getReferredType(source); if (t == s) { return true; } switch (t.tag) { case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.STRING: case TypeTags.BOOLEAN: return t.tag == s.tag && ((TypeParamAnalyzer.isTypeParam(t) || TypeParamAnalyzer.isTypeParam(s)) || (t.tag == TypeTags.TYPEREFDESC || s.tag == TypeTags.TYPEREFDESC)); case TypeTags.ANY: case TypeTags.ANYDATA: return t.tag == s.tag && hasSameReadonlyFlag(s, t) && (TypeParamAnalyzer.isTypeParam(t) || TypeParamAnalyzer.isTypeParam(s)); default: break; } return false; } @Override public Boolean visit(BBuiltInRefType t, BType s) { return t == s; } @Override public Boolean visit(BAnyType t, BType s) { return t == s; } @Override public Boolean visit(BAnydataType t, BType s) { if (t == s) { return true; } return t.tag == s.tag; } @Override public Boolean visit(BMapType t, BType s) { if (s.tag != TypeTags.MAP || !hasSameReadonlyFlag(s, t)) { return false; } BMapType sType = ((BMapType) s); return isSameType(sType.constraint, t.constraint, this.unresolvedTypes); } @Override public Boolean visit(BFutureType t, BType s) { return s.tag == TypeTags.FUTURE && isSameType(t.constraint, ((BFutureType) s).constraint, this.unresolvedTypes); } @Override public Boolean visit(BXMLType t, BType s) { return visit((BBuiltInRefType) t, s); } @Override public Boolean visit(BJSONType t, BType s) { return s.tag == TypeTags.JSON && hasSameReadonlyFlag(s, t); } @Override public Boolean visit(BArrayType t, BType s) { return s.tag == TypeTags.ARRAY && hasSameReadonlyFlag(s, t) && isSameArrayType(s, t, this.unresolvedTypes); } @Override public Boolean visit(BObjectType t, BType s) { if (t == s) { return true; } if (s.tag != TypeTags.OBJECT) { return false; } return t.tsymbol.pkgID.equals(s.tsymbol.pkgID) && t.tsymbol.name.equals(s.tsymbol.name); } @Override public Boolean visit(BRecordType t, BType s) { if (t == s) { return true; } if (s.tag != TypeTags.RECORD || !hasSameReadonlyFlag(s, t)) { return false; } BRecordType source = (BRecordType) s; if (source.fields.size() != t.fields.size()) { return false; } for (BField sourceField : source.fields.values()) { if (t.fields.containsKey(sourceField.name.value)) { BField targetField = t.fields.get(sourceField.name.value); if (isSameType(sourceField.type, targetField.type, this.unresolvedTypes) && hasSameOptionalFlag(sourceField.symbol, targetField.symbol) && (!Symbols.isFlagOn(targetField.symbol.flags, Flags.READONLY) || Symbols.isFlagOn(sourceField.symbol.flags, Flags.READONLY))) { continue; } } return false; } return isSameType(source.restFieldType, t.restFieldType, this.unresolvedTypes); } private boolean hasSameOptionalFlag(BVarSymbol s, BVarSymbol t) { return ((s.flags & Flags.OPTIONAL) ^ (t.flags & Flags.OPTIONAL)) != Flags.OPTIONAL; } private boolean hasSameReadonlyFlag(BType source, BType target) { return Symbols.isFlagOn(target.flags, Flags.READONLY) == Symbols.isFlagOn(source.flags, Flags.READONLY); } public Boolean visit(BTupleType t, BType s) { if (((!t.tupleTypes.isEmpty() && checkAllTupleMembersBelongNoType(t.tupleTypes)) || (t.restType != null && t.restType.tag == TypeTags.NONE)) && !(s.tag == TypeTags.ARRAY && ((BArrayType) s).state == BArrayState.OPEN)) { return true; } if (s.tag != TypeTags.TUPLE || !hasSameReadonlyFlag(s, t)) { return false; } BTupleType source = (BTupleType) s; if (source.tupleTypes.size() != t.tupleTypes.size()) { return false; } BType sourceRestType = source.restType; BType targetRestType = t.restType; if ((sourceRestType == null || targetRestType == null) && sourceRestType != targetRestType) { return false; } for (int i = 0; i < source.tupleTypes.size(); i++) { if (t.getTupleTypes().get(i) == symTable.noType) { continue; } if (!isSameType(source.getTupleTypes().get(i), t.tupleTypes.get(i), this.unresolvedTypes)) { return false; } } if (sourceRestType == null || targetRestType == symTable.noType) { return true; } return isSameType(sourceRestType, targetRestType, this.unresolvedTypes); } @Override public Boolean visit(BStreamType t, BType s) { return s.tag == TypeTags.STREAM && isSameStreamType(s, t, this.unresolvedTypes); } @Override public Boolean visit(BTableType t, BType s) { return t == s; } @Override public Boolean visit(BInvokableType t, BType s) { return s.tag == TypeTags.INVOKABLE && isSameFunctionType((BInvokableType) s, t, this.unresolvedTypes); } @Override public Boolean visit(BUnionType tUnionType, BType s) { if (s.tag != TypeTags.UNION || !hasSameReadonlyFlag(s, tUnionType)) { return false; } BUnionType sUnionType = (BUnionType) s; if (sUnionType.getMemberTypes().size() != tUnionType.getMemberTypes().size()) { return false; } Set<BType> sourceTypes = new LinkedHashSet<>(sUnionType.getMemberTypes().size()); Set<BType> targetTypes = new LinkedHashSet<>(tUnionType.getMemberTypes().size()); sourceTypes.add(sUnionType); sourceTypes.addAll(sUnionType.getMemberTypes()); targetTypes.add(tUnionType); targetTypes.addAll(tUnionType.getMemberTypes()); boolean notSameType = sourceTypes .stream() .map(sT -> targetTypes .stream() .anyMatch(it -> isSameType(it, sT, this.unresolvedTypes))) .anyMatch(foundSameType -> !foundSameType); return !notSameType; } @Override public Boolean visit(BIntersectionType tIntersectionType, BType s) { if (s.tag != TypeTags.INTERSECTION || !hasSameReadonlyFlag(s, tIntersectionType)) { return false; } BIntersectionType sIntersectionType = (BIntersectionType) s; if (sIntersectionType.getConstituentTypes().size() != tIntersectionType.getConstituentTypes().size()) { return false; } Set<BType> sourceTypes = new LinkedHashSet<>(sIntersectionType.getConstituentTypes()); Set<BType> targetTypes = new LinkedHashSet<>(tIntersectionType.getConstituentTypes()); for (BType sourceType : sourceTypes) { boolean foundSameType = false; for (BType targetType : targetTypes) { if (isSameType(sourceType, targetType, this.unresolvedTypes)) { foundSameType = true; break; } } if (!foundSameType) { return false; } } return true; } @Override public Boolean visit(BErrorType t, BType s) { if (s.tag != TypeTags.ERROR) { return false; } BErrorType source = (BErrorType) s; if (!source.typeIdSet.equals(t.typeIdSet)) { return false; } if (source.detailType == t.detailType) { return true; } return isSameType(source.detailType, t.detailType, this.unresolvedTypes); } @Override public Boolean visit(BTypedescType t, BType s) { if (s.tag != TypeTags.TYPEDESC) { return false; } BTypedescType sType = ((BTypedescType) s); return isSameType(sType.constraint, t.constraint, this.unresolvedTypes); } @Override public Boolean visit(BFiniteType t, BType s) { return s == t; } @Override public Boolean visit(BParameterizedType t, BType s) { if (s.tag != TypeTags.PARAMETERIZED_TYPE) { return false; } BParameterizedType sType = (BParameterizedType) s; return isSameType(sType.paramValueType, t.paramValueType) && sType.paramSymbol.equals(t.paramSymbol); } public Boolean visit(BTypeReferenceType t, BType s) { return isSameType(getReferredType(t), s); } }; private class BOrderedTypeVisitor implements BTypeVisitor<BType, Boolean> { Set<TypePair> unresolvedTypes; BOrderedTypeVisitor(Set<TypePair> unresolvedTypes) { this.unresolvedTypes = unresolvedTypes; } @Override public Boolean visit(BType target, BType source) { BType sourceType = getReferredType(source); BType targetType = getReferredType(target); int sourceTag = sourceType.tag; int targetTag = targetType.tag; if (sourceTag == TypeTags.INTERSECTION || targetTag == TypeTags.INTERSECTION) { sourceTag = getEffectiveTypeForIntersection(sourceType).tag; targetTag = getEffectiveTypeForIntersection(targetType).tag; } if (isSimpleBasicType(sourceTag) && isSimpleBasicType(targetTag)) { return (source == target) || isIntOrStringType(sourceTag, targetTag); } if (sourceTag == TypeTags.FINITE) { return checkValueSpaceHasSameType(((BFiniteType) sourceType), targetType); } return isSameOrderedType(targetType, sourceType, this.unresolvedTypes); } @Override public Boolean visit(BArrayType target, BType source) { if (source.tag != TypeTags.ARRAY) { return false; } BArrayType rhsArrayType = (BArrayType) source; boolean hasSameOrderedTypeElements = isSameOrderedType(target.eType, rhsArrayType.eType, unresolvedTypes); if (target.state == BArrayState.OPEN) { return (rhsArrayType.state == BArrayState.OPEN) && hasSameOrderedTypeElements; } return hasSameOrderedTypeElements; } @Override public Boolean visit(BTupleType target, BType source) { if (source.tag != TypeTags.TUPLE || !hasSameReadonlyFlag(source, target)) { return false; } BTupleType sourceT = (BTupleType) source; BType sourceRestType = sourceT.restType; BType targetRestType = target.restType; int sourceTupleCount = sourceT.tupleTypes.size(); int targetTupleCount = target.tupleTypes.size(); int len = Math.min(sourceTupleCount, targetTupleCount); for (int i = 0; i < len; i++) { if (!isSameOrderedType(sourceT.getTupleTypes().get(i), target.tupleTypes.get(i), this.unresolvedTypes)) { return false; } } if (sourceTupleCount == targetTupleCount) { if (sourceRestType == null || targetRestType == null) { return true; } return isSameOrderedType(sourceRestType, targetRestType, this.unresolvedTypes); } if (sourceTupleCount > targetTupleCount) { return checkSameOrderedTypeInTuples(sourceT, sourceTupleCount, targetTupleCount, sourceRestType, targetRestType); } return checkSameOrderedTypeInTuples(target, targetTupleCount, sourceTupleCount, targetRestType, sourceRestType); } private boolean checkSameOrderedTypeInTuples(BTupleType source, int sourceTupleCount, int targetTupleCount, BType sourceRestType, BType targetRestType) { if (targetRestType == null) { return true; } for (int i = targetTupleCount; i < sourceTupleCount; i++) { if (!isSameOrderedType(source.getTupleTypes().get(i), targetRestType, this.unresolvedTypes)) { return false; } } if (sourceRestType == null) { return true; } return isSameOrderedType(sourceRestType, targetRestType, this.unresolvedTypes); } @Override public Boolean visit(BUnionType target, BType source) { if (source.tag != TypeTags.UNION || !hasSameReadonlyFlag(source, target)) { return checkUnionHasSameType(target.getMemberTypes(), source); } BUnionType sUnionType = (BUnionType) source; LinkedHashSet<BType> sourceTypes = sUnionType.getMemberTypes(); LinkedHashSet<BType> targetTypes = target.getMemberTypes(); if (checkUnionHasAllFiniteOrNilMembers(sourceTypes) && checkUnionHasAllFiniteOrNilMembers(targetTypes)) { if (sourceTypes.contains(symTable.nilType) != targetTypes.contains(symTable.nilType)) { return false; } return checkValueSpaceHasSameType(((BFiniteType) target.getMemberTypes().iterator().next()), sUnionType.getMemberTypes().iterator().next()); } if (sUnionType.getMemberTypes().size() != target.getMemberTypes().size()) { return false; } return checkSameOrderedTypesInUnionMembers(sourceTypes, targetTypes); } private boolean checkSameOrderedTypesInUnionMembers(LinkedHashSet<BType> sourceTypes, LinkedHashSet<BType> targetTypes) { for (BType sourceT : sourceTypes) { boolean foundSameOrderedType = false; for (BType targetT : targetTypes) { if (isSameOrderedType(targetT, sourceT, this.unresolvedTypes)) { foundSameOrderedType = true; break; } } if (!foundSameOrderedType) { return false; } } return true; } @Override public Boolean visit(BFiniteType t, BType s) { return checkValueSpaceHasSameType(t, s); } private boolean hasSameReadonlyFlag(BType source, BType target) { return Symbols.isFlagOn(target.flags, Flags.READONLY) == Symbols.isFlagOn(source.flags, Flags.READONLY); } @Override public Boolean visit(BBuiltInRefType t, BType s) { return false; } @Override public Boolean visit(BAnyType t, BType s) { return false; } @Override public Boolean visit(BAnydataType t, BType s) { return false; } @Override public Boolean visit(BMapType t, BType s) { return false; } @Override public Boolean visit(BFutureType t, BType s) { return false; } @Override public Boolean visit(BXMLType t, BType s) { return false; } @Override public Boolean visit(BJSONType t, BType s) { return false; } @Override public Boolean visit(BObjectType t, BType s) { return false; } @Override public Boolean visit(BRecordType t, BType s) { return false; } @Override public Boolean visit(BStreamType t, BType s) { return false; } @Override public Boolean visit(BTableType t, BType s) { return false; } @Override public Boolean visit(BInvokableType t, BType s) { return false; } @Override public Boolean visit(BIntersectionType tIntersectionType, BType s) { return this.visit(getEffectiveTypeForIntersection(tIntersectionType), s); } @Override public Boolean visit(BErrorType t, BType s) { return false; } @Override public Boolean visit(BTypedescType t, BType s) { return false; } public Boolean visit(BTypeReferenceType t, BType s) { return this.visit(getReferredType(t), t); } @Override public Boolean visit(BParameterizedType t, BType s) { return false; } }; private boolean checkUnionHasSameType(LinkedHashSet<BType> memberTypes, BType baseType) { boolean isSameType = false; for (BType type : memberTypes) { type = getReferredType(type); if (type.tag == TypeTags.FINITE) { for (BLangExpression expr : ((BFiniteType) type).getValueSpace()) { isSameType = isSameOrderedType(expr.getBType(), baseType); if (!isSameType) { return false; } } } else if (type.tag == TypeTags.UNION) { return checkUnionHasSameType((LinkedHashSet<BType>) ((UnionType) type).getMemberTypes(), baseType); } else if (isSimpleBasicType(type.tag)) { isSameType = isSameOrderedType(type, baseType); if (!isSameType) { return false; } } } return isSameType; } private boolean checkValueSpaceHasSameType(BFiniteType finiteType, BType type) { BType baseType = getReferredType(type); if (baseType.tag == TypeTags.FINITE) { BType baseExprType = finiteType.getValueSpace().iterator().next().getBType(); return checkValueSpaceHasSameType(((BFiniteType) baseType), baseExprType); } boolean isValueSpaceSameType = false; for (BLangExpression expr : finiteType.getValueSpace()) { isValueSpaceSameType = isSameOrderedType(expr.getBType(), baseType); if (!isValueSpaceSameType) { break; } } return isValueSpaceSameType; } private boolean checkUnionHasAllFiniteOrNilMembers(LinkedHashSet<BType> memberTypes) { for (BType type : memberTypes) { if (type.tag != TypeTags.FINITE && type.tag != TypeTags.NIL) { return false; } } return true; } private boolean checkFieldEquivalency(BRecordType lhsType, BRecordType rhsType, Set<TypePair> unresolvedTypes) { Map<String, BField> rhsFields = new LinkedHashMap<>(rhsType.fields); for (BField lhsField : lhsType.fields.values()) { BField rhsField = rhsFields.get(lhsField.name.value); if (rhsField == null) { if (!Symbols.isOptional(lhsField.symbol)) { return false; } continue; } if (hasIncompatibleReadOnlyFlags(lhsField.symbol.flags, rhsField.symbol.flags)) { return false; } if (!Symbols.isOptional(lhsField.symbol) && Symbols.isOptional(rhsField.symbol)) { return false; } if (!isAssignable(rhsField.type, lhsField.type, unresolvedTypes)) { return false; } rhsFields.remove(lhsField.name.value); } if (lhsType.sealed) { for (BField field : rhsFields.values()) { if (!isNeverTypeOrStructureTypeWithARequiredNeverMember(field.type)) { return false; } } return true; } BType lhsRestFieldType = lhsType.restFieldType; for (BField field : rhsFields.values()) { if (!isAssignable(field.type, lhsRestFieldType, unresolvedTypes)) { return false; } } return true; } private BAttachedFunction getMatchingInvokableType(List<BAttachedFunction> rhsFuncList, BAttachedFunction lhsFunc, Set<TypePair> unresolvedTypes) { return rhsFuncList.stream() .filter(rhsFunc -> lhsFunc.funcName.equals(rhsFunc.funcName)) .filter(rhsFunc -> isFunctionTypeAssignable(rhsFunc.type, lhsFunc.type, unresolvedTypes)) .findFirst() .orElse(null); } private boolean isInSameVisibilityRegion(BSymbol lhsSym, BSymbol rhsSym) { if (Symbols.isPrivate(lhsSym)) { return Symbols.isPrivate(rhsSym) && lhsSym.pkgID.equals(rhsSym.pkgID) && lhsSym.owner.name.equals(rhsSym.owner.name); } else if (Symbols.isPublic(lhsSym)) { return Symbols.isPublic(rhsSym); } return !Symbols.isPrivate(rhsSym) && !Symbols.isPublic(rhsSym) && lhsSym.pkgID.equals(rhsSym.pkgID); } private boolean isAssignableToUnionType(BType source, BType target, Set<TypePair> unresolvedTypes) { TypePair pair = new TypePair(source, target); if (unresolvedTypes.contains(pair)) { return true; } if (source.tag == TypeTags.UNION && ((BUnionType) source).isCyclic) { unresolvedTypes.add(pair); } Set<BType> sourceTypes = new LinkedHashSet<>(); Set<BType> targetTypes = new LinkedHashSet<>(); if (source.tag == TypeTags.UNION || source.tag == TypeTags.JSON || source.tag == TypeTags.ANYDATA) { sourceTypes.addAll(getEffectiveMemberTypes((BUnionType) source)); } else { sourceTypes.add(source); } boolean targetIsAUnion = false; if (target.tag == TypeTags.UNION) { targetIsAUnion = true; targetTypes.addAll(getEffectiveMemberTypes((BUnionType) target)); } else { targetTypes.add(target); } var sourceIterator = sourceTypes.iterator(); while (sourceIterator.hasNext()) { BType sMember = sourceIterator.next(); if (sMember.tag == TypeTags.NEVER) { sourceIterator.remove(); continue; } if (sMember.tag == TypeTags.FINITE && isAssignable(sMember, target, unresolvedTypes)) { sourceIterator.remove(); continue; } if (sMember.tag == TypeTags.XML && isAssignableToUnionType(expandedXMLBuiltinSubtypes, target, unresolvedTypes)) { sourceIterator.remove(); continue; } if (!isValueType(sMember)) { if (!targetIsAUnion) { continue; } BUnionType targetUnion = (BUnionType) target; if (sMember instanceof BUnionType) { BUnionType sUnion = (BUnionType) sMember; if (sUnion.isCyclic && targetUnion.isCyclic) { unresolvedTypes.add(new TypePair(sUnion, targetUnion)); if (isAssignable(sUnion, targetUnion, unresolvedTypes)) { sourceIterator.remove(); continue; } } if (sMember.tag == TypeTags.JSON && isAssignable(sUnion, targetUnion, unresolvedTypes)) { sourceIterator.remove(); continue; } } if (sMember.tag == TypeTags.READONLY) { unresolvedTypes.add(new TypePair(sMember, targetUnion)); if (isAssignable(sMember, targetUnion, unresolvedTypes)) { sourceIterator.remove(); continue; } } continue; } boolean sourceTypeIsNotAssignableToAnyTargetType = true; var targetIterator = targetTypes.iterator(); while (targetIterator.hasNext()) { BType t = targetIterator.next(); if (isAssignable(sMember, t, unresolvedTypes)) { sourceIterator.remove(); sourceTypeIsNotAssignableToAnyTargetType = false; break; } } if (sourceTypeIsNotAssignableToAnyTargetType) { return false; } } sourceIterator = sourceTypes.iterator(); while (sourceIterator.hasNext()) { BType sourceMember = sourceIterator.next(); boolean sourceTypeIsNotAssignableToAnyTargetType = true; var targetIterator = targetTypes.iterator(); boolean selfReferencedSource = (sourceMember != source) && isSelfReferencedStructuredType(source, sourceMember); while (targetIterator.hasNext()) { BType targetMember = targetIterator.next(); boolean selfReferencedTarget = isSelfReferencedStructuredType(target, targetMember); if (selfReferencedTarget && selfReferencedSource && (sourceMember.tag == targetMember.tag)) { sourceTypeIsNotAssignableToAnyTargetType = false; break; } if (isAssignable(sourceMember, targetMember, unresolvedTypes)) { sourceTypeIsNotAssignableToAnyTargetType = false; break; } } if (sourceTypeIsNotAssignableToAnyTargetType) { return false; } } unresolvedTypes.add(pair); return true; } public boolean isSelfReferencedStructuredType(BType source, BType s) { if (source == s) { return true; } if (s.tag == TypeTags.ARRAY) { return isSelfReferencedStructuredType(source, ((BArrayType) s).eType); } if (s.tag == TypeTags.MAP) { return isSelfReferencedStructuredType(source, ((BMapType) s).constraint); } if (s.tag == TypeTags.TABLE) { return isSelfReferencedStructuredType(source, ((BTableType) s).constraint); } return false; } public BType updateSelfReferencedWithNewType(BType source, BType s, BType target) { if (s.tag == TypeTags.ARRAY) { BArrayType arrayType = (BArrayType) s; if (arrayType.eType == source) { return new BArrayType(target, arrayType.tsymbol, arrayType.size, arrayType.state, arrayType.flags); } } if (s.tag == TypeTags.MAP) { BMapType mapType = (BMapType) s; if (mapType.constraint == source) { return new BMapType(mapType.tag, target, mapType.tsymbol, mapType.flags); } } if (s.tag == TypeTags.TABLE) { BTableType tableType = (BTableType) s; if (tableType.constraint == source) { return new BTableType(tableType.tag, target, tableType.tsymbol, tableType.flags); } else if (tableType.constraint instanceof BMapType) { return updateSelfReferencedWithNewType(source, (BMapType) tableType.constraint, target); } } return s; } public static void fixSelfReferencingSameUnion(BType originalMemberType, BUnionType origUnionType, BType immutableMemberType, BUnionType newImmutableUnion, LinkedHashSet<BType> readOnlyMemTypes) { boolean sameMember = originalMemberType == immutableMemberType; if (originalMemberType.tag == TypeTags.ARRAY) { var arrayType = (BArrayType) originalMemberType; if (origUnionType == arrayType.eType) { if (sameMember) { BArrayType newArrayType = new BArrayType(newImmutableUnion, arrayType.tsymbol, arrayType.size, arrayType.state, arrayType.flags); readOnlyMemTypes.add(newArrayType); } else { ((BArrayType) immutableMemberType).eType = newImmutableUnion; readOnlyMemTypes.add(immutableMemberType); } } } else if (originalMemberType.tag == TypeTags.MAP) { var mapType = (BMapType) originalMemberType; if (origUnionType == mapType.constraint) { if (sameMember) { BMapType newMapType = new BMapType(mapType.tag, newImmutableUnion, mapType.tsymbol, mapType.flags); readOnlyMemTypes.add(newMapType); } else { ((BMapType) immutableMemberType).constraint = newImmutableUnion; readOnlyMemTypes.add(immutableMemberType); } } } else if (originalMemberType.tag == TypeTags.TABLE) { var tableType = (BTableType) originalMemberType; if (origUnionType == tableType.constraint) { if (sameMember) { BTableType newTableType = new BTableType(tableType.tag, newImmutableUnion, tableType.tsymbol, tableType.flags); readOnlyMemTypes.add(newTableType); } else { ((BTableType) immutableMemberType).constraint = newImmutableUnion; readOnlyMemTypes.add(immutableMemberType); } return; } var immutableConstraint = ((BTableType) immutableMemberType).constraint; if (tableType.constraint.tag == TypeTags.MAP) { sameMember = tableType.constraint == immutableConstraint; var mapType = (BMapType) tableType.constraint; if (origUnionType == mapType.constraint) { if (sameMember) { BMapType newMapType = new BMapType(mapType.tag, newImmutableUnion, mapType.tsymbol, mapType.flags); ((BTableType) immutableMemberType).constraint = newMapType; } else { ((BTableType) immutableMemberType).constraint = newImmutableUnion; } readOnlyMemTypes.add(immutableMemberType); } } } else { readOnlyMemTypes.add(immutableMemberType); } } private Set<BType> getEffectiveMemberTypes(BUnionType unionType) { Set<BType> memTypes = new LinkedHashSet<>(); for (BType memberType : unionType.getMemberTypes()) { switch (memberType.tag) { case TypeTags.INTERSECTION: BType effectiveType = ((BIntersectionType) memberType).effectiveType; BType refType = getReferredType(effectiveType); if (refType.tag == TypeTags.UNION) { memTypes.addAll(getEffectiveMemberTypes((BUnionType) refType)); continue; } if (refType.tag == TypeTags.INTERSECTION) { memTypes.addAll( getEffectiveMemberTypes((BUnionType) ((BIntersectionType) refType).effectiveType)); continue; } memTypes.add(effectiveType); break; case TypeTags.UNION: memTypes.addAll(getEffectiveMemberTypes((BUnionType) memberType)); break; case TypeTags.TYPEREFDESC: BType constraint = getReferredType(memberType); if (constraint.tag == TypeTags.UNION) { memTypes.addAll(getEffectiveMemberTypes((BUnionType) constraint)); continue; } memTypes.add(constraint); break; default: memTypes.add(memberType); break; } } return memTypes; } private boolean isFiniteTypeAssignable(BFiniteType finiteType, BType targetType, Set<TypePair> unresolvedTypes) { BType expType = getReferredType(targetType); if (expType.tag == TypeTags.FINITE) { return finiteType.getValueSpace().stream() .allMatch(expression -> isAssignableToFiniteType(expType, (BLangLiteral) expression)); } if (targetType.tag == TypeTags.UNION) { List<BType> unionMemberTypes = getAllTypes(targetType, true); for (BLangExpression valueExpr : finiteType.getValueSpace()) { if (unionMemberTypes.stream() .noneMatch(targetMemType -> getReferredType(targetMemType).tag == TypeTags.FINITE ? isAssignableToFiniteType(targetMemType, (BLangLiteral) valueExpr) : isAssignable(valueExpr.getBType(), targetMemType, unresolvedTypes) || isLiteralCompatibleWithBuiltinTypeWithSubTypes( (BLangLiteral) valueExpr, targetMemType))) { return false; } } return true; } for (BLangExpression expression : finiteType.getValueSpace()) { if (!isLiteralCompatibleWithBuiltinTypeWithSubTypes((BLangLiteral) expression, targetType) && !isAssignable(expression.getBType(), expType, unresolvedTypes)) { return false; } } return true; } boolean isAssignableToFiniteType(BType type, BLangLiteral literalExpr) { type = getReferredType(type); if (type.tag != TypeTags.FINITE) { return false; } BFiniteType expType = (BFiniteType) type; return expType.getValueSpace().stream().anyMatch(memberLiteral -> { if (((BLangLiteral) memberLiteral).value == null) { return literalExpr.value == null; } return checkLiteralAssignabilityBasedOnType((BLangLiteral) memberLiteral, literalExpr); }); } /** * Method to check the literal assignability based on the types of the literals. For numeric literals the * assignability depends on the equivalency of the literals. If the candidate literal could either be a simple * literal or a constant. In case of a constant, it is assignable to the base literal if and only if both * literals have same type and equivalent values. * * @param baseLiteral Literal based on which we check the assignability. * @param candidateLiteral Literal to be tested whether it is assignable to the base literal or not. * @return true if assignable; false otherwise. */ boolean checkLiteralAssignabilityBasedOnType(BLangLiteral baseLiteral, BLangLiteral candidateLiteral) { if (baseLiteral.getKind() != candidateLiteral.getKind()) { return false; } Object baseValue = baseLiteral.value; Object candidateValue = candidateLiteral.value; int candidateTypeTag = candidateLiteral.getBType().tag; switch (baseLiteral.getBType().tag) { case TypeTags.BYTE: if (candidateTypeTag == TypeTags.BYTE || (candidateTypeTag == TypeTags.INT && !candidateLiteral.isConstant && isByteLiteralValue((Long) candidateValue))) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.INT: if (candidateTypeTag == TypeTags.INT) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.SIGNED32_INT: if (candidateTypeTag == TypeTags.INT && isSigned32LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.SIGNED16_INT: if (candidateTypeTag == TypeTags.INT && isSigned16LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.SIGNED8_INT: if (candidateTypeTag == TypeTags.INT && isSigned8LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.UNSIGNED32_INT: if (candidateTypeTag == TypeTags.INT && isUnsigned32LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.UNSIGNED16_INT: if (candidateTypeTag == TypeTags.INT && isUnsigned16LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.UNSIGNED8_INT: if (candidateTypeTag == TypeTags.INT && isUnsigned8LiteralValue((Long) candidateValue)) { return ((Number) baseValue).longValue() == ((Number) candidateValue).longValue(); } break; case TypeTags.FLOAT: String baseValueStr = String.valueOf(baseValue); String originalValue = baseLiteral.originalValue != null ? baseLiteral.originalValue : baseValueStr; if (NumericLiteralSupport.isDecimalDiscriminated(originalValue)) { return false; } double baseDoubleVal = Double.parseDouble(baseValueStr); double candidateDoubleVal; if (candidateTypeTag == TypeTags.INT && !candidateLiteral.isConstant) { candidateDoubleVal = ((Long) candidateValue).doubleValue(); return baseDoubleVal == candidateDoubleVal; } else if (candidateTypeTag == TypeTags.FLOAT) { candidateDoubleVal = Double.parseDouble(String.valueOf(candidateValue)); return baseDoubleVal == candidateDoubleVal; } break; case TypeTags.DECIMAL: BigDecimal baseDecimalVal = NumericLiteralSupport.parseBigDecimal(baseValue); BigDecimal candidateDecimalVal; if (candidateTypeTag == TypeTags.INT && !candidateLiteral.isConstant) { candidateDecimalVal = new BigDecimal((long) candidateValue, MathContext.DECIMAL128); return baseDecimalVal.compareTo(candidateDecimalVal) == 0; } else if (candidateTypeTag == TypeTags.FLOAT && !candidateLiteral.isConstant || candidateTypeTag == TypeTags.DECIMAL) { if (NumericLiteralSupport.isFloatDiscriminated(String.valueOf(candidateValue))) { return false; } candidateDecimalVal = NumericLiteralSupport.parseBigDecimal(candidateValue); return baseDecimalVal.compareTo(candidateDecimalVal) == 0; } break; default: return baseValue.equals(candidateValue); } return false; } boolean isByteLiteralValue(Long longObject) { return (longObject.intValue() >= BBYTE_MIN_VALUE && longObject.intValue() <= BBYTE_MAX_VALUE); } boolean isSigned32LiteralValue(Long longObject) { return (longObject >= SIGNED32_MIN_VALUE && longObject <= SIGNED32_MAX_VALUE); } boolean isSigned16LiteralValue(Long longObject) { return (longObject.intValue() >= SIGNED16_MIN_VALUE && longObject.intValue() <= SIGNED16_MAX_VALUE); } boolean isSigned8LiteralValue(Long longObject) { return (longObject.intValue() >= SIGNED8_MIN_VALUE && longObject.intValue() <= SIGNED8_MAX_VALUE); } boolean isUnsigned32LiteralValue(Long longObject) { return (longObject >= 0 && longObject <= UNSIGNED32_MAX_VALUE); } boolean isUnsigned16LiteralValue(Long longObject) { return (longObject.intValue() >= 0 && longObject.intValue() <= UNSIGNED16_MAX_VALUE); } boolean isUnsigned8LiteralValue(Long longObject) { return (longObject.intValue() >= 0 && longObject.intValue() <= UNSIGNED8_MAX_VALUE); } boolean isCharLiteralValue(String literal) { return (literal.codePoints().count() == 1); } /** * Method to retrieve a type representing all the values in the value space of a finite type that are assignable to * the target type. * * @param finiteType the finite type * @param targetType the target type * @return a new finite type if at least one value in the value space of the specified finiteType is * assignable to targetType (the same if all are assignable), else semanticError */ BType getTypeForFiniteTypeValuesAssignableToType(BFiniteType finiteType, BType targetType) { if (isAssignable(finiteType, targetType)) { return finiteType; } Set<BLangExpression> matchingValues = new HashSet<>(); for (BLangExpression expr : finiteType.getValueSpace()) { BLangLiteral literal = (BLangLiteral) expr; if (isAssignable(expr.getBType(), targetType) || isAssignableToFiniteType(targetType, literal) || isAssignableToFiniteTypeMemberInUnion(literal, targetType) || isAssignableToBuiltinSubtypeInTargetType(literal, targetType)) { matchingValues.add(expr); } } if (matchingValues.isEmpty()) { return symTable.semanticError; } BTypeSymbol finiteTypeSymbol = Symbols.createTypeSymbol(SymTag.FINITE_TYPE, finiteType.tsymbol.flags, names.fromString("$anonType$" + UNDERSCORE + finiteTypeCount++), finiteType.tsymbol.pkgID, null, finiteType.tsymbol.owner, finiteType.tsymbol.pos, VIRTUAL); BFiniteType intersectingFiniteType = new BFiniteType(finiteTypeSymbol, matchingValues); finiteTypeSymbol.type = intersectingFiniteType; return intersectingFiniteType; } private boolean isAssignableToFiniteTypeMemberInUnion(BLangLiteral expr, BType targetType) { if (targetType.tag != TypeTags.UNION) { return false; } for (BType memType : ((BUnionType) targetType).getMemberTypes()) { if (isAssignableToFiniteType(memType, expr)) { return true; } } return false; } private boolean isAssignableToBuiltinSubtypeInTargetType(BLangLiteral literal, BType targetType) { if (targetType.tag == TypeTags.UNION) { for (BType memberType : ((BUnionType) targetType).getMemberTypes()) { if (isLiteralCompatibleWithBuiltinTypeWithSubTypes(literal, memberType)) { return true; } } } return isLiteralCompatibleWithBuiltinTypeWithSubTypes(literal, targetType); } public boolean isLiteralCompatibleWithBuiltinTypeWithSubTypes(BLangLiteral literal, BType targetType) { BType literalType = literal.getBType(); if (literalType.tag == targetType.tag) { return true; } switch (targetType.tag) { case TypeTags.BYTE: return literalType.tag == TypeTags.INT && isByteLiteralValue((Long) literal.value); case TypeTags.SIGNED32_INT: return literalType.tag == TypeTags.INT && isSigned32LiteralValue((Long) literal.value); case TypeTags.SIGNED16_INT: return literalType.tag == TypeTags.INT && isSigned16LiteralValue((Long) literal.value); case TypeTags.SIGNED8_INT: return literalType.tag == TypeTags.INT && isSigned8LiteralValue((Long) literal.value); case TypeTags.UNSIGNED32_INT: return literalType.tag == TypeTags.INT && isUnsigned32LiteralValue((Long) literal.value); case TypeTags.UNSIGNED16_INT: return literalType.tag == TypeTags.INT && isUnsigned16LiteralValue((Long) literal.value); case TypeTags.UNSIGNED8_INT: return literalType.tag == TypeTags.INT && isUnsigned8LiteralValue((Long) literal.value); case TypeTags.CHAR_STRING: return literalType.tag == TypeTags.STRING && isCharLiteralValue((String) literal.value); case TypeTags.TYPEREFDESC: return isLiteralCompatibleWithBuiltinTypeWithSubTypes(literal, getReferredType(targetType)); default: return false; } } /** * Method to retrieve a type representing all the member types of a union type that are assignable to * the target type. * * @param unionType the union type * @param targetType the target type * @param intersectionContext * @param visitedTypes cache to capture visited types * @return a single type or a new union type if at least one member type of the union type is * assignable to targetType, else semanticError */ BType getTypeForUnionTypeMembersAssignableToType(BUnionType unionType, BType targetType, SymbolEnv env, IntersectionContext intersectionContext, LinkedHashSet<BType> visitedTypes) { List<BType> intersection = new LinkedList<>(); if (!visitedTypes.add(unionType)) { return unionType; } unionType.getMemberTypes().forEach(memType -> { BType memberIntersectionType = getTypeIntersection(intersectionContext, memType, targetType, env, visitedTypes); if (memberIntersectionType != symTable.semanticError) { intersection.add(memberIntersectionType); } }); if (intersection.isEmpty()) { return symTable.semanticError; } if (intersection.size() == 1) { return intersection.get(0); } else { return BUnionType.create(null, new LinkedHashSet<>(intersection)); } } boolean validEqualityIntersectionExists(BType lhsType, BType rhsType) { if (!isAnydata(lhsType) && !isAnydata(rhsType)) { return false; } if (isAssignable(lhsType, rhsType) || isAssignable(rhsType, lhsType)) { return true; } Set<BType> lhsTypes = expandAndGetMemberTypesRecursive(lhsType); Set<BType> rhsTypes = expandAndGetMemberTypesRecursive(rhsType); return equalityIntersectionExists(lhsTypes, rhsTypes); } private boolean equalityIntersectionExists(Set<BType> lhsTypes, Set<BType> rhsTypes) { if ((lhsTypes.contains(symTable.anydataType) && rhsTypes.stream().anyMatch(type -> type.tag != TypeTags.ERROR)) || (rhsTypes.contains(symTable.anydataType) && lhsTypes.stream().anyMatch(type -> type.tag != TypeTags.ERROR))) { return true; } boolean matchFound = false; for (BType lhsType : lhsTypes) { for (BType rhsType : rhsTypes) { if (isAssignable(lhsType, rhsType) || isAssignable(rhsType, lhsType)) { matchFound = true; break; } } if (matchFound) { break; } } if (!matchFound) { matchFound = equalityIntersectionExistsForComplexTypes(lhsTypes, rhsTypes); } return matchFound; } boolean validNumericTypeExists(BType type) { if (type.isNullable() && type.tag != TypeTags.NIL) { type = getSafeType(type, true, false); } if (isBasicNumericType(type)) { return true; } switch (type.tag) { case TypeTags.UNION: BUnionType unionType = (BUnionType) type; Set<BType> memberTypes = unionType.getMemberTypes(); BType firstTypeInUnion = getReferredType(memberTypes.iterator().next()); if (firstTypeInUnion.tag == TypeTags.FINITE) { Set<BLangExpression> valSpace = ((BFiniteType) firstTypeInUnion).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BType memType : memberTypes) { if (memType.tag == TypeTags.FINITE) { if (!checkValueSpaceHasSameType((BFiniteType) memType, baseExprType)) { return false; } } if (!checkValidNumericTypesInUnion(memType, firstTypeInUnion.tag)) { return false; } } } else { for (BType memType : memberTypes) { memType = getReferredType(memType); if (!checkValidNumericTypesInUnion(memType, firstTypeInUnion.tag)) { return false; } } } return true; case TypeTags.FINITE: Set<BLangExpression> valSpace = ((BFiniteType) type).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BLangExpression expr : valSpace) { if (!checkValueSpaceHasSameType((BFiniteType) type, baseExprType)) { return false; } if (!validNumericTypeExists(expr.getBType())) { return false; } } return true; case TypeTags.TYPEREFDESC: return validNumericTypeExists(getReferredType(type)); case TypeTags.INTERSECTION: return validNumericTypeExists(((BIntersectionType) type).effectiveType); default: return false; } } private boolean checkValidNumericTypesInUnion(BType memType, int firstTypeTag) { if (memType.tag != firstTypeTag && !checkTypesBelongToInt(memType.tag, firstTypeTag)) { return false; } return validNumericTypeExists(memType); } private boolean checkTypesBelongToInt(int firstTypeTag, int secondTypeTag) { return ((TypeTags.isIntegerTypeTag(firstTypeTag) || firstTypeTag == TypeTags.BYTE) && (TypeTags.isIntegerTypeTag(secondTypeTag) || secondTypeTag == TypeTags.BYTE)); } boolean validIntegerTypeExists(BType bType) { BType type = getReferredType(bType); if (type.isNullable() && type.tag != TypeTags.NIL) { type = getSafeType(type, true, false); } if (TypeTags.isIntegerTypeTag(type.tag)) { return true; } switch (type.tag) { case TypeTags.BYTE: return true; case TypeTags.UNION: LinkedHashSet<BType> memberTypes = ((BUnionType) type).getMemberTypes(); for (BType memberType : memberTypes) { memberType = getReferredType(memberType); if (!validIntegerTypeExists(memberType)) { return false; } } return true; case TypeTags.FINITE: Set<BLangExpression> valueSpace = ((BFiniteType) type).getValueSpace(); for (BLangExpression expr : valueSpace) { if (!validIntegerTypeExists(expr.getBType())) { return false; } } return true; case TypeTags.INTERSECTION: return validIntegerTypeExists(((BIntersectionType) type).effectiveType); default: return false; } } boolean validStringOrXmlTypeExists(BType bType) { BType type = getReferredType(bType); if (TypeTags.isStringTypeTag(type.tag)) { return true; } switch (type.tag) { case TypeTags.XML: case TypeTags.XML_TEXT: return true; case TypeTags.UNION: BUnionType unionType = (BUnionType) type; Set<BType> memberTypes = unionType.getMemberTypes(); BType firstTypeInUnion = getReferredType(memberTypes.iterator().next()); if (firstTypeInUnion.tag == TypeTags.FINITE) { Set<BLangExpression> valSpace = ((BFiniteType) firstTypeInUnion).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BType memType : memberTypes) { memType = getReferredType(memType); if (memType.tag == TypeTags.FINITE) { if (!checkValueSpaceHasSameType((BFiniteType) memType, baseExprType)) { return false; } } if (!checkValidStringOrXmlTypesInUnion(memType, firstTypeInUnion.tag)) { return false; } } } else { for (BType memType : memberTypes) { memType = getReferredType(memType); if (!checkValidStringOrXmlTypesInUnion(memType, firstTypeInUnion.tag)) { return false; } } } return true; case TypeTags.FINITE: Set<BLangExpression> valSpace = ((BFiniteType) type).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BLangExpression expr : valSpace) { if (!checkValueSpaceHasSameType((BFiniteType) type, baseExprType)) { return false; } if (!validStringOrXmlTypeExists(expr.getBType())) { return false; } } return true; default: return false; } } private boolean checkValidStringOrXmlTypesInUnion(BType memType, int firstTypeTag) { if (memType.tag != firstTypeTag && !checkTypesBelongToStringOrXml(memType.tag, firstTypeTag)) { return false; } return validStringOrXmlTypeExists(memType); } private boolean checkTypesBelongToStringOrXml(int firstTypeTag, int secondTypeTag) { return (TypeTags.isStringTypeTag(firstTypeTag) && TypeTags.isStringTypeTag(secondTypeTag)) || (TypeTags.isXMLTypeTag(firstTypeTag) && TypeTags.isXMLTypeTag(secondTypeTag)); } public boolean checkTypeContainString(BType type) { if (TypeTags.isStringTypeTag(type.tag)) { return true; } switch (type.tag) { case TypeTags.UNION: for (BType memType : ((BUnionType) type).getMemberTypes()) { if (!checkTypeContainString(memType)) { return false; } } return true; case TypeTags.FINITE: Set<BLangExpression> valSpace = ((BFiniteType) type).getValueSpace(); for (BLangExpression expr : valSpace) { if (!checkTypeContainString(expr.getBType())) { return false; } } return true; case TypeTags.TYPEREFDESC: return checkTypeContainString(getReferredType(type)); default: return false; } } /** * Retrieves member types of the specified type, expanding maps/arrays of/constrained by unions types to individual * maps/arrays. * * e.g., (string|int)[] would cause three entries as string[], int[], (string|int)[] * * @param bType the type for which member types needs to be identified * @return a set containing all the retrieved member types */ public Set<BType> expandAndGetMemberTypesRecursive(BType bType) { HashSet<BType> visited = new HashSet<>(); return expandAndGetMemberTypesRecursiveHelper(bType, visited); } private Set<BType> expandAndGetMemberTypesRecursiveHelper(BType bType, HashSet<BType> visited) { Set<BType> memberTypes = new LinkedHashSet<>(); switch (bType.tag) { case TypeTags.BYTE: case TypeTags.INT: memberTypes.add(symTable.intType); memberTypes.add(symTable.byteType); break; case TypeTags.FINITE: BFiniteType expType = (BFiniteType) bType; expType.getValueSpace().forEach(value -> { memberTypes.add(value.getBType()); }); break; case TypeTags.UNION: BUnionType unionType = (BUnionType) bType; if (!visited.add(unionType)) { return memberTypes; } unionType.getMemberTypes().forEach(member -> { memberTypes.addAll(expandAndGetMemberTypesRecursiveHelper(member, visited)); }); break; case TypeTags.ARRAY: BType arrayElementType = ((BArrayType) bType).getElementType(); if (((BArrayType) bType).getSize() != -1) { memberTypes.add(new BArrayType(arrayElementType)); } if (arrayElementType.tag == TypeTags.UNION) { Set<BType> elementUnionTypes = expandAndGetMemberTypesRecursiveHelper(arrayElementType, visited); elementUnionTypes.forEach(elementUnionType -> { memberTypes.add(new BArrayType(elementUnionType)); }); } memberTypes.add(bType); break; case TypeTags.MAP: BType mapConstraintType = ((BMapType) bType).getConstraint(); if (mapConstraintType.tag == TypeTags.UNION) { Set<BType> constraintUnionTypes = expandAndGetMemberTypesRecursiveHelper(mapConstraintType, visited); constraintUnionTypes.forEach(constraintUnionType -> { memberTypes.add(new BMapType(TypeTags.MAP, constraintUnionType, symTable.mapType.tsymbol)); }); } memberTypes.add(bType); break; case TypeTags.INTERSECTION: memberTypes.addAll(expandAndGetMemberTypesRecursive(((BIntersectionType) bType).effectiveType)); break; case TypeTags.TYPEREFDESC: return expandAndGetMemberTypesRecursiveHelper(getReferredType(bType), visited); default: memberTypes.add(bType); } return memberTypes; } private boolean tupleIntersectionExists(BTupleType lhsType, BTupleType rhsType) { if (lhsType.getTupleTypes().size() != rhsType.getTupleTypes().size()) { return false; } List<BType> lhsMemberTypes = lhsType.getTupleTypes(); List<BType> rhsMemberTypes = rhsType.getTupleTypes(); for (int i = 0; i < lhsType.getTupleTypes().size(); i++) { if (!equalityIntersectionExists(expandAndGetMemberTypesRecursive(lhsMemberTypes.get(i)), expandAndGetMemberTypesRecursive(rhsMemberTypes.get(i)))) { return false; } } return true; } private boolean equalityIntersectionExistsForComplexTypes(Set<BType> lhsTypes, Set<BType> rhsTypes) { for (BType lhsMemberType : lhsTypes) { switch (lhsMemberType.tag) { case TypeTags.INT: case TypeTags.STRING: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.BOOLEAN: case TypeTags.NIL: if (rhsTypes.stream().anyMatch(rhsMemberType -> rhsMemberType.tag == TypeTags.JSON)) { return true; } break; case TypeTags.JSON: if (jsonEqualityIntersectionExists(rhsTypes)) { return true; } break; case TypeTags.TUPLE: if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.TUPLE && tupleIntersectionExists((BTupleType) lhsMemberType, (BTupleType) rhsMemberType))) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.ARRAY && arrayTupleEqualityIntersectionExists((BArrayType) rhsMemberType, (BTupleType) lhsMemberType))) { return true; } break; case TypeTags.ARRAY: if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.ARRAY && equalityIntersectionExists( expandAndGetMemberTypesRecursive(((BArrayType) lhsMemberType).eType), expandAndGetMemberTypesRecursive(((BArrayType) rhsMemberType).eType)))) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.TUPLE && arrayTupleEqualityIntersectionExists((BArrayType) lhsMemberType, (BTupleType) rhsMemberType))) { return true; } break; case TypeTags.MAP: if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.MAP && equalityIntersectionExists( expandAndGetMemberTypesRecursive(((BMapType) lhsMemberType).constraint), expandAndGetMemberTypesRecursive(((BMapType) rhsMemberType).constraint)))) { return true; } if (!isAssignable(((BMapType) lhsMemberType).constraint, symTable.errorType) && rhsTypes.stream().anyMatch(rhsMemberType -> rhsMemberType.tag == TypeTags.JSON)) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.RECORD && mapRecordEqualityIntersectionExists((BMapType) lhsMemberType, (BRecordType) rhsMemberType))) { return true; } break; case TypeTags.OBJECT: case TypeTags.RECORD: if (rhsTypes.stream().anyMatch( rhsMemberType -> checkStructEquivalency(rhsMemberType, lhsMemberType) || checkStructEquivalency(lhsMemberType, rhsMemberType))) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.RECORD && recordEqualityIntersectionExists((BRecordType) lhsMemberType, (BRecordType) rhsMemberType))) { return true; } if (rhsTypes.stream().anyMatch(rhsMemberType -> rhsMemberType.tag == TypeTags.JSON) && jsonEqualityIntersectionExists(expandAndGetMemberTypesRecursive(lhsMemberType))) { return true; } if (rhsTypes.stream().anyMatch( rhsMemberType -> rhsMemberType.tag == TypeTags.MAP && mapRecordEqualityIntersectionExists((BMapType) rhsMemberType, (BRecordType) lhsMemberType))) { return true; } break; } } return false; } private boolean arrayTupleEqualityIntersectionExists(BArrayType arrayType, BTupleType tupleType) { Set<BType> elementTypes = expandAndGetMemberTypesRecursive(arrayType.eType); return tupleType.tupleTypes.stream() .allMatch(tupleMemType -> equalityIntersectionExists(elementTypes, expandAndGetMemberTypesRecursive(tupleMemType))); } private boolean recordEqualityIntersectionExists(BRecordType lhsType, BRecordType rhsType) { Map<String, BField> lhsFields = lhsType.fields; Map<String, BField> rhsFields = rhsType.fields; List<Name> matchedFieldNames = new ArrayList<>(); for (BField lhsField : lhsFields.values()) { if (rhsFields.containsKey(lhsField.name.value)) { if (!equalityIntersectionExists(expandAndGetMemberTypesRecursive(lhsField.type), expandAndGetMemberTypesRecursive( rhsFields.get(lhsField.name.value).type))) { return false; } matchedFieldNames.add(lhsField.getName()); } else { if (Symbols.isFlagOn(lhsField.symbol.flags, Flags.OPTIONAL)) { break; } if (rhsType.sealed) { return false; } if (!equalityIntersectionExists(expandAndGetMemberTypesRecursive(lhsField.type), expandAndGetMemberTypesRecursive(rhsType.restFieldType))) { return false; } } } for (BField rhsField : rhsFields.values()) { if (matchedFieldNames.contains(rhsField.getName())) { continue; } if (!Symbols.isFlagOn(rhsField.symbol.flags, Flags.OPTIONAL)) { if (lhsType.sealed) { return false; } if (!equalityIntersectionExists(expandAndGetMemberTypesRecursive(rhsField.type), expandAndGetMemberTypesRecursive(lhsType.restFieldType))) { return false; } } } return true; } private boolean mapRecordEqualityIntersectionExists(BMapType mapType, BRecordType recordType) { Set<BType> mapConstrTypes = expandAndGetMemberTypesRecursive(mapType.getConstraint()); for (BField field : recordType.fields.values()) { if (!Symbols.isFlagOn(field.symbol.flags, Flags.OPTIONAL) && !equalityIntersectionExists(mapConstrTypes, expandAndGetMemberTypesRecursive(field.type))) { return false; } } return true; } private boolean jsonEqualityIntersectionExists(Set<BType> typeSet) { for (BType type : typeSet) { switch (type.tag) { case TypeTags.MAP: if (!isAssignable(((BMapType) type).constraint, symTable.errorType)) { return true; } break; case TypeTags.RECORD: BRecordType recordType = (BRecordType) type; if (recordType.fields.values().stream() .allMatch(field -> Symbols.isFlagOn(field.symbol.flags, Flags.OPTIONAL) || !isAssignable(field.type, symTable.errorType))) { return true; } break; default: if (isAssignable(type, symTable.jsonType)) { return true; } } } return false; } public BType getRemainingMatchExprType(BType originalType, BType typeToRemove) { switch (originalType.tag) { case TypeTags.UNION: return getRemainingType((BUnionType) originalType, getAllTypes(typeToRemove, true)); case TypeTags.FINITE: return getRemainingType((BFiniteType) originalType, getAllTypes(typeToRemove, true)); case TypeTags.TUPLE: return getRemainingType((BTupleType) originalType, typeToRemove); default: return originalType; } } private BType getRemainingType(BTupleType originalType, BType typeToRemove) { switch (typeToRemove.tag) { case TypeTags.TUPLE: return getRemainingType(originalType, (BTupleType) typeToRemove); case TypeTags.ARRAY: return getRemainingType(originalType, (BArrayType) typeToRemove); default: return originalType; } } private BType getRemainingType(BTupleType originalType, BTupleType typeToRemove) { if (originalType.restType != null) { return originalType; } List<BType> originalTupleTypes = new ArrayList<>(originalType.tupleTypes); List<BType> typesToRemove = new ArrayList<>(typeToRemove.tupleTypes); if (originalTupleTypes.size() < typesToRemove.size()) { return originalType; } List<BType> tupleTypes = new ArrayList<>(); for (int i = 0; i < originalTupleTypes.size(); i++) { tupleTypes.add(getRemainingMatchExprType(originalTupleTypes.get(i), typesToRemove.get(i))); } if (typeToRemove.restType == null) { return new BTupleType(tupleTypes); } if (originalTupleTypes.size() == typesToRemove.size()) { return originalType; } for (int i = typesToRemove.size(); i < originalTupleTypes.size(); i++) { tupleTypes.add(getRemainingMatchExprType(originalTupleTypes.get(i), typeToRemove.restType)); } return new BTupleType(tupleTypes); } private BType getRemainingType(BTupleType originalType, BArrayType typeToRemove) { BType eType = typeToRemove.eType; List<BType> tupleTypes = new ArrayList<>(); for (BType tupleType : originalType.tupleTypes) { tupleTypes.add(getRemainingMatchExprType(tupleType, eType)); } BTupleType remainingType = new BTupleType(tupleTypes); if (originalType.restType != null) { remainingType.restType = getRemainingMatchExprType(originalType.restType, eType); } return remainingType; } public BType getRemainingType(BType originalType, BType typeToRemove) { switch (originalType.tag) { case TypeTags.UNION: return getRemainingType((BUnionType) originalType, getAllTypes(typeToRemove, true)); case TypeTags.FINITE: return getRemainingType((BFiniteType) originalType, getAllTypes(typeToRemove, true)); case TypeTags.READONLY: return getRemainingType((BReadonlyType) originalType, typeToRemove); case TypeTags.TYPEREFDESC: BType refType = getReferredType(originalType); if (refType.tag != TypeTags.UNION && refType.tag != TypeTags.FINITE) { return originalType; } return getRemainingType(refType, typeToRemove); default: return originalType; } } private BType getRemainingType(BReadonlyType originalType, BType removeType) { if (removeType.tag == TypeTags.ERROR) { return symTable.anyAndReadonly; } return originalType; } public BType getTypeIntersection(IntersectionContext intersectionContext, BType lhsType, BType rhsType, SymbolEnv env) { return getTypeIntersection(intersectionContext, lhsType, rhsType, env, new LinkedHashSet<>()); } private BType getTypeIntersection(IntersectionContext intersectionContext, BType lhsType, BType rhsType, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { List<BType> rhsTypeComponents = getAllTypes(rhsType, false); LinkedHashSet<BType> intersection = new LinkedHashSet<>(rhsTypeComponents.size()); for (BType rhsComponent : rhsTypeComponents) { BType it = getIntersection(intersectionContext, lhsType, env, rhsComponent, new LinkedHashSet<>(visitedTypes)); if (it != null) { intersection.add(it); } } if (intersection.isEmpty()) { if (lhsType.tag == TypeTags.NULL_SET) { return lhsType; } return symTable.semanticError; } if (intersection.size() == 1) { return intersection.toArray(new BType[0])[0]; } else { return BUnionType.create(null, intersection); } } private BType getIntersection(IntersectionContext intersectionContext, BType lhsType, SymbolEnv env, BType type, LinkedHashSet<BType> visitedTypes) { lhsType = getEffectiveTypeForIntersection(lhsType); type = getEffectiveTypeForIntersection(type); if (intersectionContext.preferNonGenerativeIntersection) { if (isAssignable(type, lhsType)) { return type; } else if (isAssignable(lhsType, type)) { return lhsType; } } type = getReferredType(type); lhsType = getReferredType(lhsType); if (type.tag == TypeTags.ERROR && lhsType.tag == TypeTags.ERROR) { BType intersectionType = getIntersectionForErrorTypes(intersectionContext, lhsType, type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.RECORD && lhsType.tag == TypeTags.RECORD) { BType intersectionType = createRecordIntersection(intersectionContext, (BRecordType) lhsType, (BRecordType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.MAP && lhsType.tag == TypeTags.RECORD) { BType intersectionType = createRecordIntersection(intersectionContext, (BRecordType) lhsType, getEquivalentRecordType((BMapType) type), env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.RECORD && lhsType.tag == TypeTags.MAP) { BType intersectionType = createRecordIntersection(intersectionContext, getEquivalentRecordType((BMapType) lhsType), (BRecordType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (!intersectionContext.preferNonGenerativeIntersection && isAssignable(type, lhsType)) { return type; } else if (!intersectionContext.preferNonGenerativeIntersection && isAssignable(lhsType, type)) { return lhsType; } else if (lhsType.tag == TypeTags.FINITE) { BType intersectionType = getTypeForFiniteTypeValuesAssignableToType((BFiniteType) lhsType, type); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.FINITE) { BType intersectionType = getTypeForFiniteTypeValuesAssignableToType((BFiniteType) type, lhsType); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (lhsType.tag == TypeTags.UNION) { BType intersectionType = getTypeForUnionTypeMembersAssignableToType((BUnionType) lhsType, type, env, intersectionContext, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.UNION) { BType intersectionType = getTypeForUnionTypeMembersAssignableToType((BUnionType) type, lhsType, env, intersectionContext, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.MAP && lhsType.tag == TypeTags.MAP) { BType intersectionConstraintTypeType = getIntersection(intersectionContext, ((BMapType) lhsType).constraint, env, ((BMapType) type).constraint, visitedTypes); if (intersectionConstraintTypeType == null || intersectionConstraintTypeType == symTable.semanticError) { return null; } return new BMapType(TypeTags.MAP, intersectionConstraintTypeType, null); } else if (type.tag == TypeTags.ARRAY && lhsType.tag == TypeTags.TUPLE) { BType intersectionType = createArrayAndTupleIntersection(intersectionContext, (BArrayType) type, (BTupleType) lhsType, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.TUPLE && lhsType.tag == TypeTags.ARRAY) { BType intersectionType = createArrayAndTupleIntersection(intersectionContext, (BArrayType) lhsType, (BTupleType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.TUPLE && lhsType.tag == TypeTags.TUPLE) { BType intersectionType = createTupleAndTupleIntersection(intersectionContext, (BTupleType) lhsType, (BTupleType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (isAnydataOrJson(type) && lhsType.tag == TypeTags.RECORD) { BType intersectionType = createRecordIntersection(intersectionContext, (BRecordType) lhsType, getEquivalentRecordType(getMapTypeForAnydataOrJson(type)), env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.RECORD && isAnydataOrJson(lhsType)) { BType intersectionType = createRecordIntersection(intersectionContext, getEquivalentRecordType(getMapTypeForAnydataOrJson(lhsType)), (BRecordType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (isAnydataOrJson(type) && lhsType.tag == TypeTags.MAP) { return getIntersection(intersectionContext, lhsType, env, getMapTypeForAnydataOrJson(type), visitedTypes); } else if (type.tag == TypeTags.MAP && isAnydataOrJson(lhsType)) { return getIntersection(intersectionContext, getMapTypeForAnydataOrJson(lhsType), env, type, visitedTypes); } else if (isAnydataOrJson(type) && lhsType.tag == TypeTags.TUPLE) { BType intersectionType = createArrayAndTupleIntersection(intersectionContext, getArrayTypeForAnydataOrJson(type), (BTupleType) lhsType, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (type.tag == TypeTags.TUPLE && isAnydataOrJson(lhsType)) { BType intersectionType = createArrayAndTupleIntersection(intersectionContext, getArrayTypeForAnydataOrJson(lhsType), (BTupleType) type, env, visitedTypes); if (intersectionType != symTable.semanticError) { return intersectionType; } } else if (isAnydataOrJson(type) && lhsType.tag == TypeTags.ARRAY) { BType elementIntersection = getIntersection(intersectionContext, ((BArrayType) lhsType).eType, env, type, visitedTypes); if (elementIntersection == null) { return elementIntersection; } return new BArrayType(elementIntersection); } else if (type.tag == TypeTags.ARRAY && isAnydataOrJson(lhsType)) { BType elementIntersection = getIntersection(intersectionContext, lhsType, env, ((BArrayType) type).eType, visitedTypes); if (elementIntersection == null) { return elementIntersection; } return new BArrayType(elementIntersection); } else if (type.tag == TypeTags.NULL_SET) { return type; } return null; } private BType getEffectiveTypeForIntersection(BType bType) { BType type = getReferredType(bType); if (type.tag != TypeTags.INTERSECTION) { return bType; } BType effectiveType = ((BIntersectionType) type).effectiveType; return effectiveType.tag == TypeTags.UNION && ((BUnionType) effectiveType).isCyclic ? type : effectiveType; } private boolean isAnydataOrJson(BType type) { switch (type.tag) { case TypeTags.ANYDATA: case TypeTags.JSON: return true; } return false; } private BMapType getMapTypeForAnydataOrJson(BType type) { BMapType mapType = type.tag == TypeTags.ANYDATA ? symTable.mapAnydataType : symTable.mapJsonType; if (isImmutable(type)) { return (BMapType) ImmutableTypeCloner.getEffectiveImmutableType(null, this, mapType, env, symTable, anonymousModelHelper, names); } return mapType; } private BArrayType getArrayTypeForAnydataOrJson(BType type) { BArrayType arrayType = type.tag == TypeTags.ANYDATA ? symTable.arrayAnydataType : symTable.arrayJsonType; if (isImmutable(type)) { return (BArrayType) ImmutableTypeCloner.getEffectiveImmutableType(null, this, arrayType, env, symTable, anonymousModelHelper, names); } return arrayType; } private BType createArrayAndTupleIntersection(IntersectionContext intersectionContext, BArrayType arrayType, BTupleType tupleType, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { if (!visitedTypes.add(tupleType)) { return tupleType; } List<BType> tupleTypes = tupleType.tupleTypes; if (arrayType.state == BArrayState.CLOSED && tupleTypes.size() != arrayType.size) { if (tupleTypes.size() > arrayType.size) { return symTable.semanticError; } if (tupleType.restType == null) { return symTable.semanticError; } } List<BType> tupleMemberTypes = new ArrayList<>(tupleTypes.size()); BType eType = arrayType.eType; for (BType memberType : tupleTypes) { BType intersectionType = getTypeIntersection(intersectionContext, memberType, eType, env, visitedTypes); if (intersectionType == symTable.semanticError) { return symTable.semanticError; } tupleMemberTypes.add(intersectionType); } if (tupleType.restType == null) { return new BTupleType(null, tupleMemberTypes); } BType restIntersectionType = getTypeIntersection(intersectionContext, tupleType.restType, eType, env, visitedTypes); if (restIntersectionType == symTable.semanticError) { return new BTupleType(null, tupleMemberTypes); } return new BTupleType(null, tupleMemberTypes, restIntersectionType, 0); } private BType createTupleAndTupleIntersection(IntersectionContext intersectionContext, BTupleType lhsTupleType, BTupleType tupleType, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { if (lhsTupleType.restType == null && tupleType.restType != null) { return symTable.semanticError; } if (lhsTupleType.restType == null && lhsTupleType.tupleTypes.size() != tupleType.tupleTypes.size()) { return symTable.semanticError; } List<BType> lhsTupleTypes = lhsTupleType.tupleTypes; List<BType> tupleTypes = tupleType.tupleTypes; if (lhsTupleTypes.size() > tupleTypes.size()) { return symTable.semanticError; } List<BType> tupleMemberTypes = new ArrayList<>(tupleTypes.size()); for (int i = 0; i < tupleTypes.size(); i++) { BType lhsType = (lhsTupleTypes.size() > i) ? lhsTupleTypes.get(i) : lhsTupleType.restType; BType intersectionType = getTypeIntersection(intersectionContext, tupleTypes.get(i), lhsType, env, visitedTypes); if (intersectionType == symTable.semanticError) { return symTable.semanticError; } tupleMemberTypes.add(intersectionType); } if (lhsTupleType.restType != null && tupleType.restType != null) { BType restIntersectionType = getTypeIntersection(intersectionContext, tupleType.restType, lhsTupleType.restType, env, visitedTypes); if (restIntersectionType == symTable.semanticError) { return new BTupleType(null, tupleMemberTypes); } return new BTupleType(null, tupleMemberTypes, restIntersectionType, 0); } return new BTupleType(null, tupleMemberTypes); } private BType getIntersectionForErrorTypes(IntersectionContext intersectionContext, BType lhsType, BType rhsType, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { BType detailIntersectionType = getTypeIntersection(intersectionContext, ((BErrorType) lhsType).detailType, ((BErrorType) rhsType).detailType, env, visitedTypes); if (detailIntersectionType == symTable.semanticError) { return symTable.semanticError; } BErrorType intersectionErrorType = createErrorType(lhsType, rhsType, detailIntersectionType, env); if (intersectionContext.createTypeDefs) { BTypeSymbol errorTSymbol = intersectionErrorType.tsymbol; BLangErrorType bLangErrorType = TypeDefBuilderHelper.createBLangErrorType(symTable.builtinPos, intersectionErrorType, env, anonymousModelHelper); BLangTypeDefinition errorTypeDefinition = TypeDefBuilderHelper.addTypeDefinition( intersectionErrorType, errorTSymbol, bLangErrorType, env); errorTypeDefinition.pos = symTable.builtinPos; } return intersectionErrorType; } private BType createRecordIntersection(IntersectionContext intersectionContext, BRecordType recordTypeOne, BRecordType recordTypeTwo, SymbolEnv env, LinkedHashSet<BType> visitedTypes) { LinkedHashMap<String, BField> recordOneFields = recordTypeOne.fields; LinkedHashMap<String, BField> recordTwoFields = recordTypeTwo.fields; Set<String> recordOneKeys = recordOneFields.keySet(); Set<String> recordTwoKeys = recordTwoFields.keySet(); boolean isRecordOneClosed = recordTypeOne.sealed; boolean isRecordTwoClosed = recordTypeTwo.sealed; BType effectiveRecordOneRestFieldType = getConstraint(recordTypeOne); BType effectiveRecordTwoRestFieldType = getConstraint(recordTypeTwo); BRecordType newType = createAnonymousRecord(env); BTypeSymbol newTypeSymbol = newType.tsymbol; Set<String> addedKeys = new HashSet<>(); LinkedHashMap<String, BField> newTypeFields = newType.fields; if (!populateFields(intersectionContext.switchLeft(), recordTypeOne, env, recordOneFields, recordTwoFields, recordOneKeys, recordTwoKeys, isRecordTwoClosed, effectiveRecordTwoRestFieldType, newTypeSymbol, addedKeys, newTypeFields, visitedTypes)) { return symTable.semanticError; } if (!populateFields(intersectionContext.switchRight(), recordTypeTwo, env, recordTwoFields, recordOneFields, recordTwoKeys, recordOneKeys, isRecordOneClosed, effectiveRecordOneRestFieldType, newTypeSymbol, addedKeys, newTypeFields, visitedTypes)) { return symTable.semanticError; } BType restFieldType = getTypeIntersection(intersectionContext, effectiveRecordOneRestFieldType, effectiveRecordTwoRestFieldType, env, visitedTypes); if (setRestType(newType, restFieldType) == symTable.semanticError) { return symTable.semanticError; } if ((newType.sealed || newType.restFieldType == symTable.neverType) && (newTypeFields.isEmpty() || allReadOnlyFields(newTypeFields))) { newType.flags |= Flags.READONLY; newTypeSymbol.flags |= Flags.READONLY; } if (intersectionContext.createTypeDefs) { BLangRecordTypeNode recordTypeNode = TypeDefBuilderHelper.createRecordTypeNode( newType, env.enclPkg.packageID, symTable, symTable.builtinPos); BLangTypeDefinition recordTypeDef = TypeDefBuilderHelper.addTypeDefinition( newType, newType.tsymbol, recordTypeNode, env); env.enclPkg.symbol.scope.define(newType.tsymbol.name, newType.tsymbol); recordTypeDef.pos = symTable.builtinPos; } return newType; } private boolean populateFields(IntersectionContext intersectionContext, BRecordType lhsRecord, SymbolEnv env, LinkedHashMap<String, BField> lhsRecordFields, LinkedHashMap<String, BField> rhsRecordFields, Set<String> lhsRecordKeys, Set<String> rhsRecordKeys, boolean isRhsRecordClosed, BType effectiveRhsRecordRestFieldType, BTypeSymbol newTypeSymbol, Set<String> addedKeys, LinkedHashMap<String, BField> newTypeFields, LinkedHashSet<BType> visitedTypes) { for (String key : lhsRecordKeys) { BField lhsRecordField = lhsRecordFields.get(key); if (!validateRecordFieldDefaultValueForIntersection(intersectionContext, lhsRecordField, lhsRecord)) { return false; } if (!addedKeys.add(key)) { continue; } BType intersectionFieldType; long intersectionFlags = lhsRecordField.symbol.flags; BType recordOneFieldType = lhsRecordField.type; if (!rhsRecordKeys.contains(key)) { if (isRhsRecordClosed) { if (!Symbols.isFlagOn(lhsRecordField.symbol.flags, Flags.OPTIONAL)) { return false; } continue; } if (isNeverTypeOrStructureTypeWithARequiredNeverMember(effectiveRhsRecordRestFieldType) && !isNeverTypeOrStructureTypeWithARequiredNeverMember(recordOneFieldType)) { return false; } intersectionFieldType = getIntersection(intersectionContext, recordOneFieldType, env, effectiveRhsRecordRestFieldType, visitedTypes); if (intersectionFieldType == null || intersectionFieldType == symTable.semanticError) { if (Symbols.isFlagOn(lhsRecordField.symbol.flags, Flags.OPTIONAL)) { continue; } return false; } } else { BField rhsRecordField = rhsRecordFields.get(key); intersectionFieldType = getIntersection(intersectionContext, recordOneFieldType, env, rhsRecordField.type, visitedTypes); long rhsFieldFlags = rhsRecordField.symbol.flags; if (Symbols.isFlagOn(rhsFieldFlags, Flags.READONLY)) { intersectionFlags |= Flags.READONLY; } if (!Symbols.isFlagOn(rhsFieldFlags, Flags.OPTIONAL) && Symbols.isFlagOn(intersectionFlags, Flags.OPTIONAL)) { intersectionFlags &= ~Flags.OPTIONAL; } if (Symbols.isFlagOn(rhsFieldFlags, Flags.REQUIRED) && !Symbols.isFlagOn(intersectionFlags, Flags.REQUIRED)) { intersectionFlags |= Flags.REQUIRED; } } if (intersectionFieldType == null || intersectionFieldType == symTable.semanticError) { return false; } org.wso2.ballerinalang.compiler.util.Name name = lhsRecordField.name; BVarSymbol recordFieldSymbol; if (intersectionFieldType.tag == TypeTags.INVOKABLE && intersectionFieldType.tsymbol != null) { recordFieldSymbol = new BInvokableSymbol(lhsRecordField.symbol.tag, intersectionFlags, name, env.enclPkg.packageID, intersectionFieldType, newTypeSymbol, lhsRecordField.pos, SOURCE); BInvokableTypeSymbol tsymbol = (BInvokableTypeSymbol) intersectionFieldType.tsymbol; BInvokableSymbol invokableSymbol = (BInvokableSymbol) recordFieldSymbol; invokableSymbol.params = tsymbol == null ? null : new ArrayList<>(tsymbol.params); invokableSymbol.restParam = tsymbol.restParam; invokableSymbol.retType = tsymbol.returnType; invokableSymbol.flags = tsymbol.flags; } else { recordFieldSymbol = new BVarSymbol(intersectionFlags, name, env.enclPkg.packageID, intersectionFieldType, newTypeSymbol, lhsRecordField.pos, SOURCE); } newTypeFields.put(key, new BField(name, null, recordFieldSymbol)); newTypeSymbol.scope.define(name, recordFieldSymbol); } return true; } private boolean allReadOnlyFields(LinkedHashMap<String, BField> fields) { for (BField field : fields.values()) { if (!Symbols.isFlagOn(field.symbol.flags, Flags.READONLY)) { return false; } } return true; } private BType setRestType(BRecordType recordType, BType restType) { if (restType == symTable.semanticError) { recordType.restFieldType = symTable.semanticError; return symTable.semanticError; } if (restType == symTable.neverType) { recordType.sealed = true; recordType.restFieldType = symTable.noType; return symTable.noType; } recordType.restFieldType = restType; return restType; } private BType getConstraint(BRecordType recordType) { if (recordType.sealed) { return symTable.neverType; } return recordType.restFieldType; } private BRecordType createAnonymousRecord(SymbolEnv env) { EnumSet<Flag> flags = EnumSet.of(Flag.PUBLIC, Flag.ANONYMOUS); BRecordTypeSymbol recordSymbol = Symbols.createRecordSymbol(Flags.asMask(flags), Names.EMPTY, env.enclPkg.packageID, null, env.scope.owner, null, VIRTUAL); recordSymbol.name = names.fromString( anonymousModelHelper.getNextAnonymousTypeKey(env.enclPkg.packageID)); BInvokableType bInvokableType = new BInvokableType(new ArrayList<>(), symTable.nilType, null); BInvokableSymbol initFuncSymbol = Symbols.createFunctionSymbol( Flags.PUBLIC, Names.EMPTY, Names.EMPTY, env.enclPkg.symbol.pkgID, bInvokableType, env.scope.owner, false, symTable.builtinPos, VIRTUAL); initFuncSymbol.retType = symTable.nilType; recordSymbol.initializerFunc = new BAttachedFunction(Names.INIT_FUNCTION_SUFFIX, initFuncSymbol, bInvokableType, symTable.builtinPos); recordSymbol.scope = new Scope(recordSymbol); BRecordType recordType = new BRecordType(recordSymbol); recordType.tsymbol = recordSymbol; recordSymbol.type = recordType; return recordType; } private BRecordType getEquivalentRecordType(BMapType mapType) { BRecordType equivalentRecordType = new BRecordType(null); equivalentRecordType.sealed = false; equivalentRecordType.restFieldType = mapType.constraint; return equivalentRecordType; } private BErrorType createErrorType(BType lhsType, BType rhsType, BType detailType, SymbolEnv env) { BErrorType lhsErrorType = (BErrorType) lhsType; BErrorType rhsErrorType = (BErrorType) rhsType; BErrorType errorType = createErrorType(detailType, lhsType.flags, env); errorType.tsymbol.flags |= rhsType.flags; errorType.typeIdSet = BTypeIdSet.getIntersection(lhsErrorType.typeIdSet, rhsErrorType.typeIdSet); return errorType; } public BErrorType createErrorType(BType detailType, long flags, SymbolEnv env) { String name = anonymousModelHelper.getNextAnonymousIntersectionErrorTypeName(env.enclPkg.packageID); BErrorTypeSymbol errorTypeSymbol = Symbols.createErrorSymbol(flags | Flags.ANONYMOUS, names.fromString(name), env.enclPkg.symbol.pkgID, null, env.scope.owner, symTable.builtinPos, VIRTUAL); errorTypeSymbol.scope = new Scope(errorTypeSymbol); BErrorType errorType = new BErrorType(errorTypeSymbol, detailType); errorType.flags |= errorTypeSymbol.flags; errorTypeSymbol.type = errorType; errorType.typeIdSet = BTypeIdSet.emptySet(); return errorType; } private boolean populateRecordFields(IntersectionContext diagnosticContext, BRecordType newType, BType originalType, SymbolEnv env, BType constraint) { BTypeSymbol intersectionRecordSymbol = newType.tsymbol; if (originalType.getKind() != TypeKind.RECORD) { return true; } BRecordType originalRecordType = (BRecordType) originalType; LinkedHashMap<String, BField> fields = new LinkedHashMap<>(); for (BField origField : originalRecordType.fields.values()) { org.wso2.ballerinalang.compiler.util.Name origFieldName = origField.name; String nameString = origFieldName.value; if (!validateRecordFieldDefaultValueForIntersection(diagnosticContext, origField, originalRecordType)) { return false; } BType recordFieldType = validateRecordField(diagnosticContext, newType, origField, constraint, env); if (recordFieldType == symTable.semanticError) { return false; } BVarSymbol recordFieldSymbol = new BVarSymbol(origField.symbol.flags, origFieldName, env.enclPkg.packageID, recordFieldType, intersectionRecordSymbol, origField.pos, SOURCE); if (recordFieldType == symTable.neverType && Symbols.isFlagOn(recordFieldSymbol.flags, Flags.OPTIONAL)) { recordFieldSymbol.flags &= (~Flags.REQUIRED); recordFieldSymbol.flags |= Flags.OPTIONAL; } if (recordFieldType.tag == TypeTags.INVOKABLE && recordFieldType.tsymbol != null) { BInvokableTypeSymbol tsymbol = (BInvokableTypeSymbol) recordFieldType.tsymbol; BInvokableSymbol invokableSymbol = (BInvokableSymbol) recordFieldSymbol; invokableSymbol.params = tsymbol.params == null ? null : new ArrayList<>(tsymbol.params); invokableSymbol.restParam = tsymbol.restParam; invokableSymbol.retType = tsymbol.returnType; invokableSymbol.flags = tsymbol.flags; } fields.put(nameString, new BField(origFieldName, null, recordFieldSymbol)); intersectionRecordSymbol.scope.define(origFieldName, recordFieldSymbol); } newType.fields.putAll(fields); return true; } private boolean validateRecordFieldDefaultValueForIntersection(IntersectionContext diagnosticContext, BField field, BRecordType recordType) { if (field.symbol != null && field.symbol.isDefaultable && !diagnosticContext.ignoreDefaultValues) { diagnosticContext.logError(DiagnosticErrorCode.INTERSECTION_NOT_ALLOWED_WITH_TYPE, recordType, field.name); return false; } return true; } private BType validateRecordField(IntersectionContext intersectionContext, BRecordType newType, BField origField, BType constraint, SymbolEnv env) { if (hasField(newType, origField)) { return validateOverlappingFields(newType, origField); } if (constraint == null) { return origField.type; } BType fieldType = getTypeIntersection(intersectionContext, origField.type, constraint, env); if (fieldType.tag == TypeTags.NEVER && !Symbols.isOptional(origField.symbol)) { return symTable.semanticError; } if (fieldType != symTable.semanticError) { return fieldType; } if (Symbols.isOptional(origField.symbol)) { return symTable.neverType; } return symTable.semanticError; } private boolean hasField(BRecordType recordType, BField origField) { return recordType.fields.containsKey(origField.name.value); } private BType validateOverlappingFields(BRecordType newType, BField origField) { if (!hasField(newType, origField)) { return origField.type; } BField overlappingField = newType.fields.get(origField.name.value); if (isAssignable(overlappingField.type, origField.type)) { return overlappingField.type; } if (isAssignable(origField.type, overlappingField.type)) { return origField.type; } return symTable.semanticError; } private void removeErrorFromReadonlyType(List<BType> remainingTypes) { Iterator<BType> remainingIterator = remainingTypes.listIterator(); boolean addAnyAndReadOnly = false; while (remainingIterator.hasNext()) { BType remainingType = remainingIterator.next(); if (remainingType.tag != TypeTags.READONLY) { continue; } remainingIterator.remove(); addAnyAndReadOnly = true; } if (addAnyAndReadOnly) { remainingTypes.add(symTable.anyAndReadonly); } } private BType getRemainingType(BUnionType originalType, List<BType> removeTypes) { List<BType> remainingTypes = getAllTypes(originalType, true); boolean hasErrorToRemove = false; for (BType removeType : removeTypes) { remainingTypes.removeIf(type -> isAssignable(type, removeType)); if (!hasErrorToRemove && removeType.tag == TypeTags.ERROR) { hasErrorToRemove = true; } } if (hasErrorToRemove) { removeErrorFromReadonlyType(remainingTypes); } List<BType> finiteTypesToRemove = new ArrayList<>(); List<BType> finiteTypesToAdd = new ArrayList<>(); for (BType remainingType : remainingTypes) { if (remainingType.tag == TypeTags.FINITE) { BFiniteType finiteType = (BFiniteType) remainingType; finiteTypesToRemove.add(finiteType); BType remainingTypeWithMatchesRemoved = getRemainingType(finiteType, removeTypes); if (remainingTypeWithMatchesRemoved != symTable.semanticError) { finiteTypesToAdd.add(remainingTypeWithMatchesRemoved); } } } remainingTypes.removeAll(finiteTypesToRemove); remainingTypes.addAll(finiteTypesToAdd); if (remainingTypes.size() == 1) { return remainingTypes.get(0); } if (remainingTypes.isEmpty()) { return symTable.nullSet; } return BUnionType.create(null, new LinkedHashSet<>(remainingTypes)); } private BType getRemainingType(BFiniteType originalType, List<BType> removeTypes) { Set<BLangExpression> remainingValueSpace = new LinkedHashSet<>(); for (BLangExpression valueExpr : originalType.getValueSpace()) { boolean matchExists = false; for (BType remType : removeTypes) { if (isAssignable(valueExpr.getBType(), remType) || isAssignableToFiniteType(remType, (BLangLiteral) valueExpr)) { matchExists = true; break; } } if (!matchExists) { remainingValueSpace.add(valueExpr); } } if (remainingValueSpace.isEmpty()) { return symTable.semanticError; } BTypeSymbol finiteTypeSymbol = Symbols.createTypeSymbol(SymTag.FINITE_TYPE, originalType.tsymbol.flags, names.fromString("$anonType$" + UNDERSCORE + finiteTypeCount++), originalType.tsymbol.pkgID, null, originalType.tsymbol.owner, originalType.tsymbol.pos, VIRTUAL); BFiniteType intersectingFiniteType = new BFiniteType(finiteTypeSymbol, remainingValueSpace); finiteTypeSymbol.type = intersectingFiniteType; return intersectingFiniteType; } public BType getSafeType(BType bType, boolean liftNil, boolean liftError) { BType type = getReferredType(bType); if (liftNil) { switch (type.tag) { case TypeTags.JSON: return new BJSONType((BJSONType) type, false); case TypeTags.ANY: return new BAnyType(type.tag, type.tsymbol, false); case TypeTags.ANYDATA: return new BAnydataType((BAnydataType) type, false); case TypeTags.READONLY: if (liftError) { return symTable.anyAndReadonly; } return new BReadonlyType(type.tag, type.tsymbol, false); } } if (type.tag != TypeTags.UNION) { return bType; } BUnionType unionType = (BUnionType) type; LinkedHashSet<BType> memTypes = new LinkedHashSet<>(unionType.getMemberTypes()); BUnionType errorLiftedType = BUnionType.create(null, memTypes); if (liftNil) { errorLiftedType.remove(symTable.nilType); } if (liftError) { LinkedHashSet<BType> bTypes = new LinkedHashSet<>(); for (BType t : errorLiftedType.getMemberTypes()) { if (t.tag != TypeTags.ERROR) { bTypes.add(t); } } memTypes = bTypes; errorLiftedType = BUnionType.create(null, memTypes); } if (errorLiftedType.getMemberTypes().size() == 1) { return errorLiftedType.getMemberTypes().toArray(new BType[0])[0]; } if (errorLiftedType.getMemberTypes().size() == 0) { return symTable.semanticError; } return errorLiftedType; } public List<BType> getAllTypes(BType type, boolean getReferenced) { if (type.tag != TypeTags.UNION) { if (getReferenced && type.tag == TypeTags.TYPEREFDESC) { return getAllTypes(((BTypeReferenceType) type).referredType, true); } else { return Lists.of(type); } } List<BType> memberTypes = new LinkedList<>(); ((BUnionType) type).getMemberTypes().forEach(memberType -> memberTypes.addAll(getAllTypes(memberType, true))); return memberTypes; } public boolean isAllowedConstantType(BType type) { switch (type.tag) { case TypeTags.BOOLEAN: case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.STRING: case TypeTags.NIL: return true; case TypeTags.MAP: return isAllowedConstantType(((BMapType) type).constraint); case TypeTags.FINITE: BLangExpression finiteValue = ((BFiniteType) type).getValueSpace().toArray(new BLangExpression[0])[0]; return isAllowedConstantType(finiteValue.getBType()); case TypeTags.TYPEREFDESC: return isAllowedConstantType(((BTypeReferenceType) type).referredType); default: return false; } } public boolean isValidLiteral(BLangLiteral literal, BType targetType) { BType literalType = literal.getBType(); if (literalType.tag == targetType.tag) { return true; } switch (targetType.tag) { case TypeTags.BYTE: return literalType.tag == TypeTags.INT && isByteLiteralValue((Long) literal.value); case TypeTags.DECIMAL: return literalType.tag == TypeTags.FLOAT || literalType.tag == TypeTags.INT; case TypeTags.FLOAT: return literalType.tag == TypeTags.INT; case TypeTags.SIGNED32_INT: return literalType.tag == TypeTags.INT && isSigned32LiteralValue((Long) literal.value); case TypeTags.SIGNED16_INT: return literalType.tag == TypeTags.INT && isSigned16LiteralValue((Long) literal.value); case TypeTags.SIGNED8_INT: return literalType.tag == TypeTags.INT && isSigned8LiteralValue((Long) literal.value); case TypeTags.UNSIGNED32_INT: return literalType.tag == TypeTags.INT && isUnsigned32LiteralValue((Long) literal.value); case TypeTags.UNSIGNED16_INT: return literalType.tag == TypeTags.INT && isUnsigned16LiteralValue((Long) literal.value); case TypeTags.UNSIGNED8_INT: return literalType.tag == TypeTags.INT && isUnsigned8LiteralValue((Long) literal.value); case TypeTags.CHAR_STRING: return literalType.tag == TypeTags.STRING && isCharLiteralValue((String) literal.value); default: return false; } } /** * Validate if the return type of the given function is a subtype of `error?`, containing `()`. * * @param function The function of which the return type should be validated * @param diagnosticCode The code to log if the return type is invalid */ public void validateErrorOrNilReturn(BLangFunction function, DiagnosticCode diagnosticCode) { BType returnType = function.returnTypeNode.getBType(); if (returnType.tag == TypeTags.NIL) { return; } if (returnType.tag == TypeTags.UNION) { Set<BType> memberTypes = getEffectiveMemberTypes(((BUnionType) returnType)); if (returnType.isNullable() && memberTypes.stream().allMatch(type -> type.tag == TypeTags.NIL || type.tag == TypeTags.ERROR)) { return; } } dlog.error(function.returnTypeNode.pos, diagnosticCode, function.returnTypeNode.getBType().toString()); } /** * Type vector of size two, to hold the source and the target types. * * @since 0.982.0 */ private static class TypePair { BType sourceType; BType targetType; public TypePair(BType sourceType, BType targetType) { this.sourceType = sourceType; this.targetType = targetType; } @Override public boolean equals(Object obj) { if (!(obj instanceof TypePair)) { return false; } TypePair other = (TypePair) obj; return this.sourceType.equals(other.sourceType) && this.targetType.equals(other.targetType); } @Override public int hashCode() { return Objects.hash(sourceType, targetType); } } /** * A functional interface for parameterizing the type of type checking that needs to be done on the source and * target types. * * @since 0.995.0 */ private interface TypeEqualityPredicate { boolean test(BType source, BType target, Set<TypePair> unresolvedTypes); } public boolean hasFillerValue(BType type) { switch (type.tag) { case TypeTags.INT: case TypeTags.BYTE: case TypeTags.FLOAT: case TypeTags.DECIMAL: case TypeTags.STRING: case TypeTags.BOOLEAN: case TypeTags.JSON: case TypeTags.XML: case TypeTags.NIL: case TypeTags.TABLE: case TypeTags.ANYDATA: case TypeTags.MAP: case TypeTags.ANY: case TypeTags.NEVER: return true; case TypeTags.ARRAY: return checkFillerValue((BArrayType) type); case TypeTags.FINITE: return checkFillerValue((BFiniteType) type); case TypeTags.UNION: return checkFillerValue((BUnionType) type); case TypeTags.OBJECT: return checkFillerValue((BObjectType) type); case TypeTags.RECORD: return checkFillerValue((BRecordType) type); case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) type; if (tupleType.isCyclic) { return false; } return tupleType.getTupleTypes().stream().allMatch(eleType -> hasFillerValue(eleType)); case TypeTags.TYPEREFDESC: return hasFillerValue(getReferredType(type)); default: if (TypeTags.isIntegerTypeTag(type.tag)) { return true; } return false; } } private boolean checkFillerValue(BObjectType type) { if ((type.tsymbol.flags & Flags.CLASS) != Flags.CLASS) { return false; } BAttachedFunction initFunction = ((BObjectTypeSymbol) type.tsymbol).initializerFunc; if (initFunction == null) { return true; } if (initFunction.symbol.getReturnType().getKind() != TypeKind.NIL) { return false; } for (BVarSymbol bVarSymbol : initFunction.symbol.getParameters()) { if (!bVarSymbol.isDefaultable) { return false; } } return true; } /** * This will handle two types. Singleton : As singleton can have one value that value should it self be a valid fill * value Union : 1. if nil is a member it is the fill values 2. else all the values should belong to same type and * the default value for that type should be a member of the union precondition : value space should have at least * one element * * @param type BFiniteType union or finite * @return boolean whether type has a valid filler value or not */ private boolean checkFillerValue(BFiniteType type) { if (type.isNullable()) { return true; } if (type.getValueSpace().size() == 1) { return true; } Iterator iterator = type.getValueSpace().iterator(); BLangExpression firstElement = (BLangExpression) iterator.next(); boolean defaultFillValuePresent = isImplicitDefaultValue(firstElement); while (iterator.hasNext()) { BLangExpression value = (BLangExpression) iterator.next(); if (!isSameBasicType(value.getBType(), firstElement.getBType())) { return false; } if (!defaultFillValuePresent && isImplicitDefaultValue(value)) { defaultFillValuePresent = true; } } return defaultFillValuePresent; } private boolean hasImplicitDefaultValue(Set<BLangExpression> valueSpace) { for (BLangExpression expression : valueSpace) { if (isImplicitDefaultValue(expression)) { return true; } } return false; } private boolean checkFillerValue(BUnionType type) { if (type.isNullable()) { return true; } Set<BType> memberTypes = new HashSet<>(); boolean hasFillerValue = false; boolean defaultValuePresent = false; boolean finiteTypePresent = false; for (BType member : getAllTypes(type, true)) { if (member.tag == TypeTags.FINITE) { Set<BType> uniqueValues = getValueTypes(((BFiniteType) member).getValueSpace()); memberTypes.addAll(uniqueValues); if (!defaultValuePresent && hasImplicitDefaultValue(((BFiniteType) member).getValueSpace())) { defaultValuePresent = true; } finiteTypePresent = true; } else { memberTypes.add(member); } if (!hasFillerValue && hasFillerValue(member)) { hasFillerValue = true; } } if (!hasFillerValue) { return false; } Iterator<BType> iterator = memberTypes.iterator(); BType firstMember = iterator.next(); while (iterator.hasNext()) { if (!isSameBasicType(firstMember, iterator.next())) { return false; } } if (finiteTypePresent) { return defaultValuePresent; } return true; } private boolean isSameBasicType(BType source, BType target) { if (isSameType(source, target)) { return true; } if (TypeTags.isIntegerTypeTag(source.tag) && TypeTags.isIntegerTypeTag(target.tag)) { return true; } return false; } private Set<BType> getValueTypes(Set<BLangExpression> valueSpace) { Set<BType> uniqueType = new HashSet<>(); for (BLangExpression expression : valueSpace) { uniqueType.add(expression.getBType()); } return uniqueType; } private boolean isImplicitDefaultValue(BLangExpression expression) { if ((expression.getKind() == NodeKind.LITERAL) || (expression.getKind() == NodeKind.NUMERIC_LITERAL)) { BLangLiteral literalExpression = (BLangLiteral) expression; BType literalExprType = literalExpression.getBType(); Object value = literalExpression.getValue(); switch (literalExprType.getKind()) { case INT: case BYTE: return value.equals(Long.valueOf(0)); case STRING: return value == null || value.equals(""); case DECIMAL: case FLOAT: return value.equals(String.valueOf(0.0)); case BOOLEAN: return value.equals(Boolean.valueOf(false)); case NIL: return true; default: return false; } } return false; } private boolean checkFillerValue(BRecordType type) { for (BField field : type.fields.values()) { if (Symbols.isFlagOn(field.symbol.flags, Flags.OPTIONAL)) { continue; } if (Symbols.isFlagOn(field.symbol.flags, Flags.REQUIRED)) { return false; } } return true; } private boolean checkFillerValue(BArrayType type) { if (type.size == -1) { return true; } return hasFillerValue(type.eType); } /** * Get result type of the query output. * * @param type type of query expression. * @return result type. */ public BType resolveExprType(BType type) { switch (type.tag) { case TypeTags.STREAM: return ((BStreamType) type).constraint; case TypeTags.TABLE: return ((BTableType) type).constraint; case TypeTags.ARRAY: return ((BArrayType) type).eType; case TypeTags.UNION: List<BType> exprTypes = new ArrayList<>(((BUnionType) type).getMemberTypes()); for (BType returnType : exprTypes) { switch (returnType.tag) { case TypeTags.STREAM: return ((BStreamType) returnType).constraint; case TypeTags.TABLE: return ((BTableType) returnType).constraint; case TypeTags.ARRAY: return ((BArrayType) returnType).eType; case TypeTags.STRING: case TypeTags.XML: return returnType; } } default: return type; } } /** * Check whether a type is an ordered type. * * @param type type. * @param hasCycle whether there is a cycle. * @return boolean whether the type is an ordered type or not. */ public boolean isOrderedType(BType type, boolean hasCycle) { switch (type.tag) { case TypeTags.UNION: BUnionType unionType = (BUnionType) type; if (hasCycle) { return true; } if (unionType.isCyclic) { hasCycle = true; } Set<BType> memberTypes = unionType.getMemberTypes(); boolean allMembersOrdered = false; BType firstTypeInUnion = getReferredType(memberTypes.iterator().next()); for (BType memType : memberTypes) { memType = getReferredType(memType); if (memType.tag == TypeTags.FINITE && firstTypeInUnion.tag == TypeTags.FINITE) { Set<BLangExpression> valSpace = ((BFiniteType) firstTypeInUnion).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); if (!checkValueSpaceHasSameType((BFiniteType) memType, baseExprType)) { return false; } } else if (memType.tag == TypeTags.UNION) { return isOrderedType(memType, hasCycle); } else if (memType.tag != firstTypeInUnion.tag && memType.tag != TypeTags.NIL && !isIntOrStringType(memType.tag, firstTypeInUnion.tag)) { return false; } allMembersOrdered = isOrderedType(memType, hasCycle); if (!allMembersOrdered) { break; } } return allMembersOrdered; case TypeTags.ARRAY: BType elementType = ((BArrayType) type).eType; return isOrderedType(elementType, hasCycle); case TypeTags.TUPLE: List<BType> tupleMemberTypes = ((BTupleType) type).tupleTypes; for (BType memType : tupleMemberTypes) { if (!isOrderedType(memType, hasCycle)) { return false; } } BType restType = ((BTupleType) type).restType; return restType == null || isOrderedType(restType, hasCycle); case TypeTags.FINITE: boolean isValueSpaceOrdered = false; Set<BLangExpression> valSpace = ((BFiniteType) type).getValueSpace(); BType baseExprType = valSpace.iterator().next().getBType(); for (BLangExpression expr : valSpace) { if (!checkValueSpaceHasSameType((BFiniteType) type, baseExprType)) { return false; } isValueSpaceOrdered = isOrderedType(expr.getBType(), hasCycle); if (!isValueSpaceOrdered) { break; } } return isValueSpaceOrdered; case TypeTags.TYPEREFDESC: return isOrderedType(getReferredType(type), hasCycle); case TypeTags.INTERSECTION: return isOrderedType(getEffectiveTypeForIntersection(type), hasCycle); default: return isSimpleBasicType(type.tag); } } private boolean isIntOrStringType(int firstTypeTag, int secondTypeTag) { return ((TypeTags.isIntegerTypeTag(firstTypeTag) || firstTypeTag == TypeTags.BYTE) && (TypeTags.isIntegerTypeTag(secondTypeTag) || secondTypeTag == TypeTags.BYTE)) || ((TypeTags.isStringTypeTag(firstTypeTag)) && (TypeTags.isStringTypeTag(secondTypeTag))); } public boolean isUnionOfSimpleBasicTypes(BType bType) { BType type = getReferredType(bType); if (type.tag == TypeTags.UNION) { Set<BType> memberTypes = ((BUnionType) type).getMemberTypes(); for (BType memType : memberTypes) { memType = getReferredType(memType); if (!isSimpleBasicType(memType.tag)) { return false; } } return true; } return isSimpleBasicType(type.tag); } public BType findCompatibleType(BType type) { switch (type.tag) { case TypeTags.DECIMAL: case TypeTags.FLOAT: case TypeTags.XML: case TypeTags.XML_TEXT: return type; case TypeTags.INT: case TypeTags.BYTE: case TypeTags.SIGNED32_INT: case TypeTags.SIGNED16_INT: case TypeTags.SIGNED8_INT: case TypeTags.UNSIGNED32_INT: case TypeTags.UNSIGNED16_INT: case TypeTags.UNSIGNED8_INT: return symTable.intType; case TypeTags.STRING: case TypeTags.CHAR_STRING: return symTable.stringType; case TypeTags.UNION: LinkedHashSet<BType> memberTypes = ((BUnionType) type).getMemberTypes(); return findCompatibleType(memberTypes.iterator().next()); case TypeTags.TYPEREFDESC: return findCompatibleType(((BTypeReferenceType) type).referredType); default: Set<BLangExpression> valueSpace = ((BFiniteType) type).getValueSpace(); return findCompatibleType(valueSpace.iterator().next().getBType()); } } public boolean isNonNilSimpleBasicTypeOrString(BType bType) { BType type = getReferredType(bType); if (type.tag == TypeTags.UNION) { Set<BType> memberTypes = ((BUnionType) type).getMemberTypes(); for (BType member : memberTypes) { BType memType = getReferredType(member); if (memType.tag == TypeTags.NIL || !isSimpleBasicType(memType.tag)) { return false; } } return true; } return type.tag != TypeTags.NIL && isSimpleBasicType(type.tag); } public boolean isSubTypeOfReadOnlyOrIsolatedObjectUnion(BType bType) { BType type = getReferredType(bType); if (isInherentlyImmutableType(type) || Symbols.isFlagOn(type.flags, Flags.READONLY)) { return true; } int tag = type.tag; if (tag == TypeTags.OBJECT) { return isIsolated(type); } if (tag != TypeTags.UNION) { return false; } for (BType memberType : ((BUnionType) type).getMemberTypes()) { if (!isSubTypeOfReadOnlyOrIsolatedObjectUnion(memberType)) { return false; } } return true; } private boolean isIsolated(BType type) { return Symbols.isFlagOn(type.flags, Flags.ISOLATED); } private boolean isImmutable(BType type) { return Symbols.isFlagOn(type.flags, Flags.READONLY); } BType getTypeWithoutNil(BType type) { BType constraint = getReferredType(type); if (constraint.tag != TypeTags.UNION) { return constraint; } BUnionType unionType = (BUnionType) constraint; if (!unionType.isNullable()) { return unionType; } List<BType> nonNilTypes = new ArrayList<>(); for (BType memberType : unionType.getMemberTypes()) { if (!isAssignable(memberType, symTable.nilType)) { nonNilTypes.add(memberType); } } if (nonNilTypes.size() == 1) { return nonNilTypes.get(0); } return BUnionType.create(null, new LinkedHashSet<>(nonNilTypes)); } public boolean isNeverTypeOrStructureTypeWithARequiredNeverMember(BType type) { if (type == null) { return false; } Set<BType> visitedTypeSet = new HashSet<>(); visitedTypeSet.add(type); return isNeverTypeOrStructureTypeWithARequiredNeverMember(type, visitedTypeSet); } boolean isNeverTypeOrStructureTypeWithARequiredNeverMember(BType type, Set<BType> visitedTypeSet) { switch (type.tag) { case TypeTags.NEVER: return true; case TypeTags.RECORD: for (BField field : ((BRecordType) type).fields.values()) { if ((SymbolFlags.isFlagOn(field.symbol.flags, SymbolFlags.REQUIRED) || !SymbolFlags.isFlagOn(field.symbol.flags, SymbolFlags.OPTIONAL)) && !visitedTypeSet.contains(field.type) && isNeverTypeOrStructureTypeWithARequiredNeverMember(field.type, visitedTypeSet)) { return true; } } return false; case TypeTags.TUPLE: BTupleType tupleType = (BTupleType) type; List<BType> tupleTypes = tupleType.tupleTypes; for (BType mem : tupleTypes) { if (!visitedTypeSet.add(mem)) { continue; } if (isNeverTypeOrStructureTypeWithARequiredNeverMember(mem, visitedTypeSet)) { return true; } } return false; case TypeTags.ARRAY: BArrayType arrayType = (BArrayType) type; visitedTypeSet.add(arrayType.eType); return arrayType.state != BArrayState.OPEN && isNeverTypeOrStructureTypeWithARequiredNeverMember(arrayType.eType, visitedTypeSet); case TypeTags.TYPEREFDESC: visitedTypeSet.add(type); return isNeverTypeOrStructureTypeWithARequiredNeverMember(getReferredType(type), visitedTypeSet); default: return false; } } boolean isSingletonType(BType bType) { BType type = getReferredType(bType); return type.tag == TypeTags.FINITE && ((BFiniteType) type).getValueSpace().size() == 1; } boolean isSameSingletonType(BFiniteType type1, BFiniteType type2) { BLangLiteral expr1 = (BLangLiteral) type1.getValueSpace().iterator().next(); BLangLiteral expr2 = (BLangLiteral) type2.getValueSpace().iterator().next(); return expr1.value.equals(expr2.value); } private static class ListenerValidationModel { private final Types types; private final SymbolTable symtable; private final BType serviceNameType; boolean attachFound; boolean detachFound; boolean startFound; boolean gracefulStopFound; boolean immediateStopFound; public ListenerValidationModel(Types types, SymbolTable symTable) { this.types = types; this.symtable = symTable; this.serviceNameType = BUnionType.create(null, symtable.stringType, symtable.arrayStringType, symtable.nilType); } boolean isValidListener() { return attachFound && detachFound && startFound && gracefulStopFound && immediateStopFound; } private boolean checkMethods(List<BAttachedFunction> rhsFuncs) { for (BAttachedFunction func : rhsFuncs) { switch (func.funcName.value) { case "attach": if (!checkAttachMethod(func)) { return false; } break; case "detach": if (!checkDetachMethod(func)) { return false; } break; case "start": if (!checkStartMethod(func)) { return true; } break; case "gracefulStop": if (!checkGracefulStop(func)) { return false; } break; case "immediateStop": if (!checkImmediateStop(func)) { return false; } break; } } return isValidListener(); } private boolean emptyParamList(BAttachedFunction func) { return func.type.paramTypes.isEmpty() && func.type.restType != symtable.noType; } private boolean publicAndReturnsErrorOrNil(BAttachedFunction func) { if (!Symbols.isPublic(func.symbol)) { return false; } return types.isAssignable(func.type.retType, symtable.errorOrNilType); } private boolean isPublicNoParamReturnsErrorOrNil(BAttachedFunction func) { if (!publicAndReturnsErrorOrNil(func)) { return false; } return emptyParamList(func); } private boolean checkImmediateStop(BAttachedFunction func) { return immediateStopFound = isPublicNoParamReturnsErrorOrNil(func); } private boolean checkGracefulStop(BAttachedFunction func) { return gracefulStopFound = isPublicNoParamReturnsErrorOrNil(func); } private boolean checkStartMethod(BAttachedFunction func) { return startFound = publicAndReturnsErrorOrNil(func); } private boolean checkDetachMethod(BAttachedFunction func) { if (!publicAndReturnsErrorOrNil(func)) { return false; } if (func.type.paramTypes.size() != 1) { return false; } return detachFound = isServiceObject(func.type.paramTypes.get(0)); } private boolean checkAttachMethod(BAttachedFunction func) { if (!publicAndReturnsErrorOrNil(func)) { return false; } if (func.type.paramTypes.size() != 2) { return false; } BType firstParamType = func.type.paramTypes.get(0); if (!isServiceObject(firstParamType)) { return false; } BType secondParamType = func.type.paramTypes.get(1); boolean sameType = types.isAssignable(secondParamType, this.serviceNameType); return attachFound = sameType; } private boolean isServiceObject(BType bType) { BType type = types.getReferredType(bType); if (type.tag == TypeTags.UNION) { for (BType memberType : ((BUnionType) type).getMemberTypes()) { if (!isServiceObject(memberType)) { return false; } } return true; } if (type.tag != TypeTags.OBJECT) { return false; } return Symbols.isService(type.tsymbol); } } /** * Intersection type validation helper. * * @since 2.0.0 */ public static class IntersectionContext { Location lhsPos; Location rhsPos; BLangDiagnosticLog dlog; ContextOption contextOption; boolean ignoreDefaultValues; boolean createTypeDefs; boolean preferNonGenerativeIntersection; private IntersectionContext(BLangDiagnosticLog diaglog, Location left, Location right) { this.dlog = diaglog; this.lhsPos = left; this.rhsPos = right; this.contextOption = ContextOption.NON; this.ignoreDefaultValues = false; this.createTypeDefs = true; this.preferNonGenerativeIntersection = false; } /** * Create {@link IntersectionContext} used for calculating the intersection type when user * explicitly write intersection type. This will produce error messages explaining why there is no intersection * between two types. * * @return a {@link IntersectionContext} */ public static IntersectionContext from(BLangDiagnosticLog diaglog, Location left, Location right) { return new IntersectionContext(diaglog, left, right); } /** * Create {@link IntersectionContext} used for calculating the intersection type to see if there * is a intersection between the types. This does not emit error messages explaning why there is no intersection * between two types. This also does not generate type-def for the calculated intersection type. * Do not use this context to create a intersection type that uses the calculated type for any purpose other * than seeing if there is a interserction. * * @return a {@link IntersectionContext} */ public static IntersectionContext compilerInternalIntersectionTestContext() { IntersectionContext intersectionContext = new IntersectionContext(null, null, null); intersectionContext.ignoreDefaultValues = true; intersectionContext.createTypeDefs = false; return intersectionContext; } /** * Create {@link IntersectionContext} used for calculating the intersection type. * This does not emit error messages explaining why there is no intersection between two types. * * @return a {@link IntersectionContext} */ public static IntersectionContext compilerInternalIntersectionContext() { IntersectionContext diagnosticContext = new IntersectionContext(null, null, null); return diagnosticContext; } /** * Create {@link IntersectionContext} used for checking the existence of a valid intersection, irrespective * of default values. * Type definitions are not created. * This does not emit error messages explaining why there is no intersection between two types. * * @return a {@link IntersectionContext} */ public static IntersectionContext typeTestIntersectionExistenceContext() { IntersectionContext intersectionContext = new IntersectionContext(null, null, null); intersectionContext.ignoreDefaultValues = true; intersectionContext.preferNonGenerativeIntersection = true; intersectionContext.createTypeDefs = false; return intersectionContext; } /** * Create {@link IntersectionContext} used for creating effective types for the intersection of types, * irrespective of default values. * Type definitions are created. * This does not emit error messages explaining why there is no intersection between two types. * * @return a {@link IntersectionContext} */ public static IntersectionContext typeTestIntersectionCalculationContext() { IntersectionContext intersectionContext = new IntersectionContext(null, null, null); intersectionContext.ignoreDefaultValues = true; intersectionContext.preferNonGenerativeIntersection = true; intersectionContext.createTypeDefs = true; return intersectionContext; } public IntersectionContext switchLeft() { this.contextOption = ContextOption.LEFT; return this; } public IntersectionContext switchRight() { this.contextOption = ContextOption.RIGHT; return this; } private boolean logError(DiagnosticErrorCode diagnosticCode, Object... args) { Location pos = null; if (contextOption == ContextOption.LEFT && lhsPos != null) { pos = lhsPos; } else if (contextOption == ContextOption.RIGHT && rhsPos != null) { pos = rhsPos; } if (pos != null) { dlog.error(pos, diagnosticCode, args); return true; } return false; } } private enum ContextOption { LEFT, RIGHT, NON; } }
Could you extract the common code of initializing the environment?
public void testNonMaterialization() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.enableCheckpointing(10) .enableChangelogStateBackend(true) .getCheckpointConfig() .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); env.setStateBackend(new DelegatedStateBackendWrapper(delegatedStateBackend, t -> t)) .setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); env.configure( new Configuration() .set( StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, Duration.ofMinutes(3))); waitAndAssert( env, buildStreamGraph( env, new ControlledSource() { @Override protected void beforeElement() throws Exception { if (getRuntimeContext().getAttemptNumber() == 0 && currentIndex == TOTAL_ELEMENTS / 2) { waitWhile(() -> completedCheckpointNum.get() <= 0); throwArtificialFailure(); } } })); }
env.configure(
public void testNonMaterialization() throws Exception { File checkpointFolder = TEMPORARY_FOLDER.newFolder(); SharedReference<AtomicBoolean> hasMaterialization = sharedObjects.add(new AtomicBoolean(true)); StreamExecutionEnvironment env = getEnv(delegatedStateBackend, checkpointFolder, 1000, 1, Long.MAX_VALUE, 0); waitAndAssert( buildJobGraph( env, new ControlledSource() { @Override protected void beforeElement(SourceContext<Integer> ctx) throws Exception { if (getRuntimeContext().getAttemptNumber() == 0 && currentIndex == TOTAL_ELEMENTS / 2) { waitWhile(() -> completedCheckpointNum.get() <= 0); hasMaterialization .get() .compareAndSet( true, !getAllStateHandleId(checkpointFolder) .isEmpty()); throwArtificialFailure(); } } }, generateJobID())); Preconditions.checkState(!hasMaterialization.get().get()); }
class ChangelogPeriodicMaterializationITCase extends ChangelogPeriodicMaterializationTestBase { private static final AtomicBoolean triggerDelegatedSnapshot = new AtomicBoolean(); public ChangelogPeriodicMaterializationITCase(AbstractStateBackend delegatedStateBackend) { super(delegatedStateBackend); } @Before public void setup() throws Exception { super.setup(); triggerDelegatedSnapshot.set(false); } /** Recovery from checkpoint only containing non-materialized state. */ @Test /** Recovery from checkpoint containing non-materialized state and materialized state. */ @Test public void testMaterialization() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.enableCheckpointing(10) .enableChangelogStateBackend(true) .getCheckpointConfig() .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); SerializableFunctionWithException<RunnableFuture<SnapshotResult<KeyedStateHandle>>> snapshotResultConsumer = snapshotResultFuture -> { PENDING_MATERIALIZATION.add(snapshotResultFuture); return snapshotResultFuture; }; env.setStateBackend( new DelegatedStateBackendWrapper( delegatedStateBackend, snapshotResultConsumer)) .setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 0)); env.configure( new Configuration() .set( StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, Duration.ofMillis(10))); waitAndAssert( env, buildStreamGraph( env, new ControlledSource() { @Override protected void beforeElement() throws Exception { if (getRuntimeContext().getAttemptNumber() == 0 && currentIndex == TOTAL_ELEMENTS / 4) { waitWhile( () -> completedCheckpointNum.get() <= 0 || PENDING_MATERIALIZATION.stream() .noneMatch(Future::isDone)); PENDING_MATERIALIZATION.clear(); throwArtificialFailure(); } else if (getRuntimeContext().getAttemptNumber() == 1 && currentIndex == TOTAL_ELEMENTS / 2) { waitWhile( () -> completedCheckpointNum.get() <= 1 || PENDING_MATERIALIZATION.stream() .noneMatch(Future::isDone)); throwArtificialFailure(); } } })); } @Test public void testFailedMaterialization() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.enableCheckpointing(10) .enableChangelogStateBackend(true) .getCheckpointConfig() .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); env.setStateBackend( new DelegatedStateBackendWrapper( delegatedStateBackend, snapshotResultFuture -> { RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshotResultFutureWrapper = new FutureTaskWithException<>( () -> { SnapshotResult<KeyedStateHandle> snapshotResult = snapshotResultFuture .get(); if (PENDING_MATERIALIZATION.size() == 0) { throw new RuntimeException(); } else { return snapshotResult; } }); PENDING_MATERIALIZATION.add(snapshotResultFutureWrapper); return snapshotResultFutureWrapper; })) .setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); env.configure( new Configuration() .set( StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, Duration.ofMillis(20)) .set(StateChangelogOptions.MATERIALIZATION_MAX_FAILURES_ALLOWED, 1)); waitAndAssert( env, buildStreamGraph( env, new ControlledSource() { @Override protected void beforeElement() throws Exception { waitWhile( () -> currentIndex >= TOTAL_ELEMENTS / 2 && PENDING_MATERIALIZATION.size() == 0); } })); } }
class ChangelogPeriodicMaterializationITCase extends ChangelogPeriodicMaterializationTestBase { public ChangelogPeriodicMaterializationITCase(AbstractStateBackend delegatedStateBackend) { super(delegatedStateBackend); } /** Recovery from checkpoint only containing non-materialized state. */ @Test /** Recovery from checkpoint containing non-materialized state and materialized state. */ @Test public void testMaterialization() throws Exception { File checkpointFolder = TEMPORARY_FOLDER.newFolder(); SharedReference<AtomicInteger> currentCheckpointNum = sharedObjects.add(new AtomicInteger()); SharedReference<Set<StateHandleID>> currentMaterializationId = sharedObjects.add(ConcurrentHashMap.newKeySet()); StreamExecutionEnvironment env = getEnv(delegatedStateBackend, checkpointFolder, 100, 2, 50, 0); waitAndAssert( buildJobGraph( env, new ControlledSource() { @Override protected void beforeElement(SourceContext<Integer> ctx) throws Exception { Preconditions.checkState( getRuntimeContext().getAttemptNumber() <= 2); if (getRuntimeContext().getAttemptNumber() == 0 && currentIndex == TOTAL_ELEMENTS / 4) { waitWhile( () -> { if (completedCheckpointNum.get() <= 0) { return true; } Set<StateHandleID> allMaterializationId = getAllStateHandleId(checkpointFolder); if (!allMaterializationId.isEmpty()) { currentMaterializationId .get() .addAll(allMaterializationId); currentCheckpointNum .get() .compareAndSet( 0, completedCheckpointNum.get()); return false; } return true; }); throwArtificialFailure(); } else if (getRuntimeContext().getAttemptNumber() == 1 && currentIndex == TOTAL_ELEMENTS / 2) { waitWhile( () -> { if (completedCheckpointNum.get() <= currentCheckpointNum.get().get()) { return true; } Set<StateHandleID> allMaterializationId = getAllStateHandleId(checkpointFolder); return allMaterializationId.isEmpty() || currentMaterializationId .get() .equals(allMaterializationId); }); throwArtificialFailure(); } } }, generateJobID())); } @Test public void testFailedMaterialization() throws Exception { File checkpointFolder = TEMPORARY_FOLDER.newFolder(); SharedReference<AtomicBoolean> hasFailed = sharedObjects.add(new AtomicBoolean()); SharedReference<Set<StateHandleID>> currentMaterializationId = sharedObjects.add(ConcurrentHashMap.newKeySet()); StreamExecutionEnvironment env = getEnv( new DelegatedStateBackendWrapper( delegatedStateBackend, snapshotResultFuture -> { if (hasFailed.get().compareAndSet(false, true)) { throw new RuntimeException(); } else { return snapshotResultFuture; } }), checkpointFolder, 100, 1, 10, 1); env.setParallelism(1); waitAndAssert( buildJobGraph( env, new ControlledSource() { @Override protected void beforeElement(SourceContext<Integer> ctx) throws Exception { if (currentIndex == TOTAL_ELEMENTS / 8) { waitWhile(() -> !hasFailed.get().get()); } else if (currentIndex == TOTAL_ELEMENTS / 4) { waitWhile( () -> { Set<StateHandleID> allMaterializationId = getAllStateHandleId(checkpointFolder); if (!allMaterializationId.isEmpty()) { currentMaterializationId .get() .addAll(allMaterializationId); return false; } return true; }); } else if (currentIndex == TOTAL_ELEMENTS / 2) { waitWhile( () -> { Set<StateHandleID> allMaterializationId = getAllStateHandleId(checkpointFolder); return allMaterializationId.isEmpty() || currentMaterializationId .get() .equals(allMaterializationId); }); } } }, generateJobID())); } }
The test is now failing to build, please address the precommit failures (e.g. https://github.com/apache/beam/actions/runs/6344373926/job/17234456430?pr=28513)
public void testActiveThreadMetric() throws Exception { int maxThreads = 5; int threadExpirationSec = 60; BoundedQueueExecutor executor = new BoundedQueueExecutor( maxThreads, threadExpirationSec, TimeUnit.SECONDS, maxThreads, 10000000, new ThreadFactoryBuilder() .setNameFormat("DataflowWorkUnits-%d") .setDaemon(true) .build()); StreamingDataflowWorker.ComputationState computationState = new StreamingDataflowWorker.ComputationState( "computation", defaultMapTask(Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()))), executor, ImmutableMap.of(), null); ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1); MockActiveWork m1 = new MockActiveWork(1) { @Override public void run() { synchronized (this) { this.notify(); } int count = 0; while (!exit) { count += 1; } Thread.currentThread().interrupt(); } }; MockWork m2 = new MockWork(2) { @Override public void run() { synchronized (this) { this.notify(); } try { Thread.sleep(2000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } }; MockWork m3 = new MockWork(3) { @Override public void run() { synchronized (this) { this.notify(); } try { Thread.sleep(2000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } }; assertEquals(0, executor.activeCount()); assertTrue(computationState.activateWork(key1Shard1, m1)); synchronized (m1) { executor.execute(m1, m1.getWorkItem().getSerializedSize()); m1.wait(); } assertEquals(2, executor.activeCount()); assertTrue(computationState.activateWork(key1Shard1, m2)); assertTrue(computationState.activateWork(key1Shard1, m3)); synchronized (m2) { executor.execute(m2, m2.getWorkItem().getSerializedSize()); m2.wait(); } synchronized (m3) { executor.execute(m3, m3.getWorkItem().getSerializedSize()); m3.wait(); } assertEquals(4, executor.activeCount()); m1.stop(); executor.shutdown(); }
MockActiveWork m1 =
public void testActiveThreadMetric() throws Exception { int maxThreads = 5; int threadExpirationSec = 60; BoundedQueueExecutor executor = new BoundedQueueExecutor( maxThreads, threadExpirationSec, TimeUnit.SECONDS, maxThreads, 10000000, new ThreadFactoryBuilder() .setNameFormat("DataflowWorkUnits-%d") .setDaemon(true) .build()); ComputationState computationState = new ComputationState( "computation", defaultMapTask(Arrays.asList(makeSourceInstruction(StringUtf8Coder.of()))), executor, ImmutableMap.of(), null); ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1); Consumer<Work> sleepProcessWorkFn = unused -> { synchronized (this) { this.notify(); } int count = 0; while (!stop) { count += 1; } }; Work m2 = createMockWork(2, sleepProcessWorkFn); Work m3 = createMockWork(3, sleepProcessWorkFn); Work m4 = createMockWork(4, sleepProcessWorkFn); assertEquals(0, executor.activeCount()); assertTrue(computationState.activateWork(key1Shard1, m2)); synchronized (this) { executor.execute(m2, m2.getWorkItem().getSerializedSize()); this.wait(); this.wait(); } assertEquals(2, executor.activeCount()); assertTrue(computationState.activateWork(key1Shard1, m3)); assertTrue(computationState.activateWork(key1Shard1, m4)); synchronized (this) { executor.execute(m3, m3.getWorkItem().getSerializedSize()); this.wait(); } assertEquals(3, executor.activeCount()); synchronized (this) { executor.execute(m4, m4.getWorkItem().getSerializedSize()); this.wait(); } assertEquals(4, executor.activeCount()); stop = true; executor.shutdown(); }
class MockActiveWork extends StreamingDataflowWorker.Work { public static volatile boolean exit; public MockActiveWork(long workToken) { super( Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(), Instant::now, Collections.emptyList()); exit = false; } @Override public void run() {} public void stop() { exit = true; } }
class StreamingDataflowWorkerTest { private static final Logger LOG = LoggerFactory.getLogger(StreamingDataflowWorkerTest.class); private static final IntervalWindow DEFAULT_WINDOW = new IntervalWindow(new Instant(1234), Duration.millis(1000)); private static final IntervalWindow WINDOW_AT_ZERO = new IntervalWindow(new Instant(0), new Instant(1000)); private static final IntervalWindow WINDOW_AT_ONE_SECOND = new IntervalWindow(new Instant(1000), new Instant(2000)); private static final Coder<IntervalWindow> DEFAULT_WINDOW_CODER = IntervalWindow.getCoder(); private static final Coder<Collection<IntervalWindow>> DEFAULT_WINDOW_COLLECTION_CODER = CollectionCoder.of(DEFAULT_WINDOW_CODER); private static final String DEFAULT_COMPUTATION_ID = "computation"; private static final String DEFAULT_MAP_STAGE_NAME = "computation"; private static final String DEFAULT_MAP_SYSTEM_NAME = "computation"; private static final String DEFAULT_OUTPUT_ORIGINAL_NAME = "originalName"; private static final String DEFAULT_OUTPUT_SYSTEM_NAME = "systemName"; private static final String DEFAULT_PARDO_SYSTEM_NAME = "parDo"; private static final String DEFAULT_PARDO_ORIGINAL_NAME = "parDoOriginalName"; private static final String DEFAULT_PARDO_USER_NAME = "parDoUserName"; private static final String DEFAULT_PARDO_STATE_FAMILY = "parDoStateFamily"; private static final String DEFAULT_SOURCE_SYSTEM_NAME = "source"; private static final String DEFAULT_SOURCE_ORIGINAL_NAME = "sourceOriginalName"; private static final String DEFAULT_SINK_SYSTEM_NAME = "sink"; private static final String DEFAULT_SINK_ORIGINAL_NAME = "sinkOriginalName"; private static final String DEFAULT_SOURCE_COMPUTATION_ID = "upstream"; private static final String DEFAULT_KEY_STRING = "key"; private static final long DEFAULT_SHARDING_KEY = 12345; private static final ByteString DEFAULT_KEY_BYTES = ByteString.copyFromUtf8(DEFAULT_KEY_STRING); private static final String DEFAULT_DATA_STRING = "data"; private static final String DEFAULT_DESTINATION_STREAM_ID = "out"; private static final Function<GetDataRequest, GetDataResponse> EMPTY_DATA_RESPONDER = (GetDataRequest request) -> { GetDataResponse.Builder builder = GetDataResponse.newBuilder(); for (ComputationGetDataRequest compRequest : request.getRequestsList()) { ComputationGetDataResponse.Builder compBuilder = builder.addDataBuilder().setComputationId(compRequest.getComputationId()); for (KeyedGetDataRequest keyRequest : compRequest.getRequestsList()) { KeyedGetDataResponse.Builder keyBuilder = compBuilder .addDataBuilder() .setKey(keyRequest.getKey()) .setShardingKey(keyRequest.getShardingKey()); keyBuilder.addAllValues(keyRequest.getValuesToFetchList()); keyBuilder.addAllBags(keyRequest.getBagsToFetchList()); keyBuilder.addAllWatermarkHolds(keyRequest.getWatermarkHoldsToFetchList()); } } return builder.build(); }; private final boolean streamingEngine; private final Supplier<Long> idGenerator = new Supplier<Long>() { private final AtomicLong idGenerator = new AtomicLong(1L); @Override public Long get() { return idGenerator.getAndIncrement(); } }; @Rule public BlockingFn blockingFn = new BlockingFn(); @Rule public TestRule restoreMDC = new RestoreDataflowLoggingMDC(); @Rule public ErrorCollector errorCollector = new ErrorCollector(); WorkUnitClient mockWorkUnitClient = mock(WorkUnitClient.class); HotKeyLogger hotKeyLogger = mock(HotKeyLogger.class); public StreamingDataflowWorkerTest(Boolean streamingEngine) { this.streamingEngine = streamingEngine; } @Parameterized.Parameters(name = "{index}: [streamingEngine={0}]") public static Iterable<Object[]> data() { return Arrays.asList(new Object[][] {{false}, {true}}); } private static CounterUpdate getCounter(Iterable<CounterUpdate> counters, String name) { for (CounterUpdate counter : counters) { if (counter.getNameAndKind().getName().equals(name)) { return counter; } } return null; } static Work createMockWork(long workToken) { return Work.create( Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(), Instant::now, Collections.emptyList(), work -> {}); } static Work createMockWork(long workToken, Consumer<Work> processWorkFn) { return Work.create( Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(), Instant::now, Collections.emptyList(), processWorkFn); } private byte[] intervalWindowBytes(IntervalWindow window) throws Exception { return CoderUtils.encodeToByteArray( DEFAULT_WINDOW_COLLECTION_CODER, Collections.singletonList(window)); } private String keyStringForIndex(int index) { return DEFAULT_KEY_STRING + index; } private String dataStringForIndex(long index) { return DEFAULT_DATA_STRING + index; } private ParallelInstruction makeWindowingSourceInstruction(Coder<?> coder) { CloudObject timerCloudObject = CloudObject.forClassName( "com.google.cloud.dataflow.sdk.util.TimerOrElement$TimerOrElementCoder"); List<CloudObject> component = Collections.singletonList(CloudObjects.asCloudObject(coder, /* sdkComponents= */ null)); Structs.addList(timerCloudObject, PropertyNames.COMPONENT_ENCODINGS, component); CloudObject encodedCoder = CloudObject.forClassName("kind:windowed_value"); Structs.addBoolean(encodedCoder, PropertyNames.IS_WRAPPER, true); Structs.addList( encodedCoder, PropertyNames.COMPONENT_ENCODINGS, ImmutableList.of( timerCloudObject, CloudObjects.asCloudObject(IntervalWindowCoder.of(), /* sdkComponents= */ null))); return new ParallelInstruction() .setSystemName(DEFAULT_SOURCE_SYSTEM_NAME) .setOriginalName(DEFAULT_SOURCE_ORIGINAL_NAME) .setRead( new ReadInstruction() .setSource( new Source() .setSpec(CloudObject.forClass(WindowingWindmillReader.class)) .setCodec(encodedCoder))) .setOutputs( Collections.singletonList( new InstructionOutput() .setName(Long.toString(idGenerator.get())) .setCodec(encodedCoder) .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME))); } private ParallelInstruction makeSourceInstruction(Coder<?> coder) { return new ParallelInstruction() .setSystemName(DEFAULT_SOURCE_SYSTEM_NAME) .setOriginalName(DEFAULT_SOURCE_ORIGINAL_NAME) .setRead( new ReadInstruction() .setSource( new Source() .setSpec(CloudObject.forClass(UngroupedWindmillReader.class)) .setCodec( CloudObjects.asCloudObject( WindowedValue.getFullCoder(coder, IntervalWindow.getCoder()), /* sdkComponents= */ null)))) .setOutputs( Collections.singletonList( new InstructionOutput() .setName(Long.toString(idGenerator.get())) .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setCodec( CloudObjects.asCloudObject( WindowedValue.getFullCoder(coder, IntervalWindow.getCoder()), /* sdkComponents= */ null)))); } private ParallelInstruction makeDoFnInstruction( DoFn<?, ?> doFn, int producerIndex, Coder<?> outputCoder, WindowingStrategy<?, ?> windowingStrategy) { CloudObject spec = CloudObject.forClassName("DoFn"); addString( spec, PropertyNames.SERIALIZED_FN, StringUtils.byteArrayToJsonString( SerializableUtils.serializeToByteArray( DoFnInfo.forFn( doFn, windowingStrategy /* windowing strategy */, null /* side input views */, null /* input coder */, new TupleTag<>(PropertyNames.OUTPUT) /* main output id */, DoFnSchemaInformation.create(), Collections.emptyMap())))); return new ParallelInstruction() .setSystemName(DEFAULT_PARDO_SYSTEM_NAME) .setName(DEFAULT_PARDO_USER_NAME) .setOriginalName(DEFAULT_PARDO_ORIGINAL_NAME) .setParDo( new ParDoInstruction() .setInput( new InstructionInput() .setProducerInstructionIndex(producerIndex) .setOutputNum(0)) .setNumOutputs(1) .setUserFn(spec) .setMultiOutputInfos( Collections.singletonList(new MultiOutputInfo().setTag(PropertyNames.OUTPUT)))) .setOutputs( Collections.singletonList( new InstructionOutput() .setName(PropertyNames.OUTPUT) .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setCodec( CloudObjects.asCloudObject( WindowedValue.getFullCoder( outputCoder, windowingStrategy.getWindowFn().windowCoder()), /* sdkComponents= */ null)))); } private ParallelInstruction makeDoFnInstruction( DoFn<?, ?> doFn, int producerIndex, Coder<?> outputCoder) { WindowingStrategy<?, ?> windowingStrategy = WindowingStrategy.of(FixedWindows.of(Duration.millis(10))); return makeDoFnInstruction(doFn, producerIndex, outputCoder, windowingStrategy); } private ParallelInstruction makeSinkInstruction( String streamId, Coder<?> coder, int producerIndex, Coder<? extends BoundedWindow> windowCoder) { CloudObject spec = CloudObject.forClass(WindmillSink.class); addString(spec, "stream_id", streamId); return new ParallelInstruction() .setSystemName(DEFAULT_SINK_SYSTEM_NAME) .setOriginalName(DEFAULT_SINK_ORIGINAL_NAME) .setWrite( new WriteInstruction() .setInput( new InstructionInput() .setProducerInstructionIndex(producerIndex) .setOutputNum(0)) .setSink( new Sink() .setSpec(spec) .setCodec( CloudObjects.asCloudObject( WindowedValue.getFullCoder(coder, windowCoder), /* sdkComponents= */ null)))); } private ParallelInstruction makeSinkInstruction( Coder<?> coder, int producerIndex, Coder<? extends BoundedWindow> windowCoder) { return makeSinkInstruction(DEFAULT_DESTINATION_STREAM_ID, coder, producerIndex, windowCoder); } private ParallelInstruction makeSinkInstruction(Coder<?> coder, int producerIndex) { return makeSinkInstruction(coder, producerIndex, IntervalWindow.getCoder()); } /** * Returns a {@link MapTask} with the provided {@code instructions} and default values everywhere * else. */ private MapTask defaultMapTask(List<ParallelInstruction> instructions) { MapTask mapTask = new MapTask() .setStageName(DEFAULT_MAP_STAGE_NAME) .setSystemName(DEFAULT_MAP_SYSTEM_NAME) .setInstructions(instructions); mapTask.setFactory(Transport.getJsonFactory()); return mapTask; } private Windmill.GetWorkResponse buildInput(String input, byte[] metadata) throws Exception { Windmill.GetWorkResponse.Builder builder = Windmill.GetWorkResponse.newBuilder(); TextFormat.merge(input, builder); if (metadata != null) { Windmill.InputMessageBundle.Builder messageBundleBuilder = builder.getWorkBuilder(0).getWorkBuilder(0).getMessageBundlesBuilder(0); for (Windmill.Message.Builder messageBuilder : messageBundleBuilder.getMessagesBuilderList()) { messageBuilder.setMetadata(addPaneTag(PaneInfo.NO_FIRING, metadata)); } } return builder.build(); } private Windmill.GetWorkResponse buildSessionInput( int workToken, long inputWatermark, long outputWatermark, List<Long> inputs, List<Timer> timers) throws Exception { Windmill.WorkItem.Builder builder = Windmill.WorkItem.newBuilder(); builder.setKey(DEFAULT_KEY_BYTES); builder.setShardingKey(DEFAULT_SHARDING_KEY); builder.setCacheToken(1); builder.setWorkToken(workToken); builder.setOutputDataWatermark(outputWatermark * 1000); if (!inputs.isEmpty()) { InputMessageBundle.Builder messageBuilder = Windmill.InputMessageBundle.newBuilder() .setSourceComputationId(DEFAULT_SOURCE_COMPUTATION_ID); for (Long input : inputs) { messageBuilder.addMessages( Windmill.Message.newBuilder() .setTimestamp(input) .setData(ByteString.copyFromUtf8(dataStringForIndex(input))) .setMetadata( addPaneTag( PaneInfo.NO_FIRING, intervalWindowBytes( new IntervalWindow( new Instant(input), new Instant(input).plus(Duration.millis(10))))))); } builder.addMessageBundles(messageBuilder); } if (!timers.isEmpty()) { builder.setTimers(Windmill.TimerBundle.newBuilder().addAllTimers(timers)); } return Windmill.GetWorkResponse.newBuilder() .addWork( Windmill.ComputationWorkItems.newBuilder() .setComputationId(DEFAULT_COMPUTATION_ID) .setInputDataWatermark(inputWatermark * 1000) .addWork(builder)) .build(); } private Windmill.GetWorkResponse makeInput(int index, long timestamp) throws Exception { return makeInput(index, timestamp, keyStringForIndex(index), DEFAULT_SHARDING_KEY); } private Windmill.GetWorkResponse makeInput( int index, long timestamp, String key, long shardingKey) throws Exception { return buildInput( "work {" + " computation_id: \"" + DEFAULT_COMPUTATION_ID + "\"" + " input_data_watermark: 0" + " work {" + " key: \"" + key + "\"" + " sharding_key: " + shardingKey + " work_token: " + index + " cache_token: 3" + " hot_key_info {" + " hot_key_age_usec: 1000000" + " }" + " message_bundles {" + " source_computation_id: \"" + DEFAULT_SOURCE_COMPUTATION_ID + "\"" + " messages {" + " timestamp: " + timestamp + " data: \"data" + index + "\"" + " }" + " }" + " }" + "}", CoderUtils.encodeToByteArray( CollectionCoder.of(IntervalWindow.getCoder()), Collections.singletonList(DEFAULT_WINDOW))); } /** * Returns a {@link org.apache.beam.runners.dataflow.windmill.Windmill.WorkItemCommitRequest} * builder parsed from the provided text format proto. */ private WorkItemCommitRequest.Builder parseCommitRequest(String output) throws Exception { WorkItemCommitRequest.Builder builder = Windmill.WorkItemCommitRequest.newBuilder(); TextFormat.merge(output, builder); return builder; } /** Sets the metadata of all the contained messages in this WorkItemCommitRequest. */ private WorkItemCommitRequest.Builder setMessagesMetadata( PaneInfo pane, byte[] windowBytes, WorkItemCommitRequest.Builder builder) throws Exception { if (windowBytes != null) { KeyedMessageBundle.Builder bundles = builder.getOutputMessagesBuilder(0).getBundlesBuilder(0); for (int i = 0; i < bundles.getMessagesCount(); i++) { bundles.getMessagesBuilder(i).setMetadata(addPaneTag(pane, windowBytes)); } } return builder; } /** Reset value update timestamps to zero. */ private WorkItemCommitRequest.Builder setValuesTimestamps(WorkItemCommitRequest.Builder builder) { for (int i = 0; i < builder.getValueUpdatesCount(); i++) { builder.getValueUpdatesBuilder(i).getValueBuilder().setTimestamp(0); } return builder; } private WorkItemCommitRequest.Builder makeExpectedOutput(int index, long timestamp) throws Exception { return makeExpectedOutput( index, timestamp, keyStringForIndex(index), DEFAULT_SHARDING_KEY, keyStringForIndex(index)); } private WorkItemCommitRequest.Builder makeExpectedOutput( int index, long timestamp, String key, long shardingKey, String outKey) throws Exception { StringBuilder expectedCommitRequestBuilder = initializeExpectedCommitRequest(key, shardingKey, index); appendCommitOutputMessages(expectedCommitRequestBuilder, index, timestamp, outKey); return setMessagesMetadata( PaneInfo.NO_FIRING, intervalWindowBytes(DEFAULT_WINDOW), parseCommitRequest(expectedCommitRequestBuilder.toString())); } private WorkItemCommitRequest removeDynamicFields(WorkItemCommitRequest request) { return request.toBuilder().clearPerWorkItemLatencyAttributions().build(); } private WorkItemCommitRequest.Builder makeExpectedTruncationRequestOutput( int index, String key, long shardingKey, long estimatedSize) throws Exception { StringBuilder expectedCommitRequestBuilder = initializeExpectedCommitRequest(key, shardingKey, index, false); appendCommitTruncationFields(expectedCommitRequestBuilder, estimatedSize); return parseCommitRequest(expectedCommitRequestBuilder.toString()); } private StringBuilder initializeExpectedCommitRequest( String key, long shardingKey, int index, Boolean hasSourceBytesProcessed) { StringBuilder requestBuilder = new StringBuilder(); requestBuilder.append("key: \""); requestBuilder.append(key); requestBuilder.append("\" "); requestBuilder.append("sharding_key: "); requestBuilder.append(shardingKey); requestBuilder.append(" "); requestBuilder.append("work_token: "); requestBuilder.append(index); requestBuilder.append(" "); requestBuilder.append("cache_token: 3 "); if (hasSourceBytesProcessed) requestBuilder.append("source_bytes_processed: 0 "); return requestBuilder; } private StringBuilder initializeExpectedCommitRequest(String key, long shardingKey, int index) { return initializeExpectedCommitRequest(key, shardingKey, index, true); } private StringBuilder appendCommitOutputMessages( StringBuilder requestBuilder, int index, long timestamp, String outKey) { requestBuilder.append("output_messages {"); requestBuilder.append(" destination_stream_id: \""); requestBuilder.append(DEFAULT_DESTINATION_STREAM_ID); requestBuilder.append("\""); requestBuilder.append(" bundles {"); requestBuilder.append(" key: \""); requestBuilder.append(outKey); requestBuilder.append("\""); requestBuilder.append(" messages {"); requestBuilder.append(" timestamp: "); requestBuilder.append(timestamp); requestBuilder.append(" data: \""); requestBuilder.append(dataStringForIndex(index)); requestBuilder.append("\""); requestBuilder.append(" metadata: \"\""); requestBuilder.append(" }"); requestBuilder.append(" messages_ids: \"\""); requestBuilder.append(" }"); requestBuilder.append("}"); return requestBuilder; } private StringBuilder appendCommitTruncationFields( StringBuilder requestBuilder, long estimatedSize) { requestBuilder.append("exceeds_max_work_item_commit_bytes: true "); requestBuilder.append("estimated_work_item_commit_bytes: "); requestBuilder.append(estimatedSize); return requestBuilder; } private StreamingComputationConfig makeDefaultStreamingComputationConfig( List<ParallelInstruction> instructions) { StreamingComputationConfig config = new StreamingComputationConfig(); config.setComputationId(DEFAULT_COMPUTATION_ID); config.setSystemName(DEFAULT_MAP_SYSTEM_NAME); config.setStageName(DEFAULT_MAP_STAGE_NAME); config.setInstructions(instructions); return config; } private ByteString addPaneTag(PaneInfo pane, byte[] windowBytes) throws IOException { ByteStringOutputStream output = new ByteStringOutputStream(); PaneInfo.PaneInfoCoder.INSTANCE.encode(pane, output, Context.OUTER); output.write(windowBytes); return output.toByteString(); } private StreamingDataflowWorkerOptions createTestingPipelineOptions( FakeWindmillServer server, String... args) { List<String> argsList = Lists.newArrayList(args); if (streamingEngine) { argsList.add("--experiments=enable_streaming_engine"); } StreamingDataflowWorkerOptions options = PipelineOptionsFactory.fromArgs(argsList.toArray(new String[0])) .as(StreamingDataflowWorkerOptions.class); options.setAppName("StreamingWorkerHarnessTest"); options.setJobId("test_job_id"); options.setStreaming(true); options.setWindmillServerStub(server); options.setActiveWorkRefreshPeriodMillis(0); return options; } private StreamingDataflowWorker makeWorker( List<ParallelInstruction> instructions, StreamingDataflowWorkerOptions options, boolean publishCounters, Supplier<Instant> clock, Function<String, ScheduledExecutorService> executorSupplier) throws Exception { StreamingDataflowWorker worker = new StreamingDataflowWorker( Collections.singletonList(defaultMapTask(instructions)), IntrinsicMapTaskExecutorFactory.defaultFactory(), mockWorkUnitClient, options, publishCounters, hotKeyLogger, clock, executorSupplier); worker.addStateNameMappings( ImmutableMap.of(DEFAULT_PARDO_USER_NAME, DEFAULT_PARDO_STATE_FAMILY)); return worker; } private StreamingDataflowWorker makeWorker( List<ParallelInstruction> instructions, StreamingDataflowWorkerOptions options, boolean publishCounters) throws Exception { return makeWorker( instructions, options, publishCounters, Instant::now, (threadName) -> Executors.newSingleThreadScheduledExecutor()); } @Test public void testBasicHarness() throws Exception { List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */); worker.start(); final int numIters = 2000; for (int i = 0; i < numIters; ++i) { server.whenGetWorkCalled().thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i))); } Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters); worker.stop(); for (int i = 0; i < numIters; ++i) { assertTrue(result.containsKey((long) i)); assertEquals( makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(), removeDynamicFields(result.get((long) i))); } verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any()); } @Test public void testBasic() throws Exception { List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); server.setIsReady(false); StreamingConfigTask streamingConfig = new StreamingConfigTask(); streamingConfig.setStreamingComputationConfigs( ImmutableList.of(makeDefaultStreamingComputationConfig(instructions))); streamingConfig.setWindmillServiceEndpoint("foo"); WorkItem workItem = new WorkItem(); workItem.setStreamingConfigTask(streamingConfig); when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem)); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */); worker.start(); final int numIters = 2000; for (int i = 0; i < numIters; ++i) { server.whenGetWorkCalled().thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i))); } Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters); worker.stop(); for (int i = 0; i < numIters; ++i) { assertTrue(result.containsKey((long) i)); assertEquals( makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(), removeDynamicFields(result.get((long) i))); } verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any()); } @Test public void testHotKeyLogging() throws Exception { List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of())), makeSinkInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); server.setIsReady(false); StreamingConfigTask streamingConfig = new StreamingConfigTask(); streamingConfig.setStreamingComputationConfigs( ImmutableList.of(makeDefaultStreamingComputationConfig(instructions))); streamingConfig.setWindmillServiceEndpoint("foo"); WorkItem workItem = new WorkItem(); workItem.setStreamingConfigTask(streamingConfig); when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem)); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server, "--hotKeyLoggingEnabled=true"); StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */); worker.start(); final int numIters = 2000; for (int i = 0; i < numIters; ++i) { server .whenGetWorkCalled() .thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i), "key", DEFAULT_SHARDING_KEY)); } server.waitForAndGetCommits(numIters); worker.stop(); verify(hotKeyLogger, atLeastOnce()) .logHotKeyDetection(nullable(String.class), any(), eq("key")); } @Test public void testHotKeyLoggingNotEnabled() throws Exception { List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of())), makeSinkInstruction(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); server.setIsReady(false); StreamingConfigTask streamingConfig = new StreamingConfigTask(); streamingConfig.setStreamingComputationConfigs( ImmutableList.of(makeDefaultStreamingComputationConfig(instructions))); streamingConfig.setWindmillServiceEndpoint("foo"); WorkItem workItem = new WorkItem(); workItem.setStreamingConfigTask(streamingConfig); when(mockWorkUnitClient.getGlobalStreamingConfigWorkItem()).thenReturn(Optional.of(workItem)); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */); worker.start(); final int numIters = 2000; for (int i = 0; i < numIters; ++i) { server .whenGetWorkCalled() .thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i), "key", DEFAULT_SHARDING_KEY)); } server.waitForAndGetCommits(numIters); worker.stop(); verify(hotKeyLogger, atLeastOnce()).logHotKeyDetection(nullable(String.class), any()); } @Test public void testIgnoreRetriedKeys() throws Exception { final int numIters = 4; List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction(blockingFn, 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */); worker.start(); for (int i = 0; i < numIters; ++i) { server .whenGetWorkCalled() .thenReturn( makeInput( i, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY)) .thenReturn( makeInput( i + 1000, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY + 1)); } BlockingFn.counter.acquire(numIters * 2); for (int i = 0; i < numIters; ++i) { server .whenGetWorkCalled() .thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i))) .thenReturn( makeInput( i + 1000, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY + 1)); } server.waitForEmptyWorkQueue(); for (int i = 0; i < numIters; ++i) { server .whenGetWorkCalled() .thenReturn( makeInput( i + numIters, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY)); } server.waitForEmptyWorkQueue(); BlockingFn.blocker.countDown(); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(numIters * 3); for (int i = 0; i < numIters; ++i) { assertTrue(result.containsKey((long) i)); assertEquals( makeExpectedOutput(i, TimeUnit.MILLISECONDS.toMicros(i)).build(), removeDynamicFields(result.get((long) i))); assertTrue(result.containsKey((long) i + 1000)); assertEquals( makeExpectedOutput( i + 1000, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY + 1, keyStringForIndex(i)) .build(), removeDynamicFields(result.get((long) i + 1000))); assertTrue(result.containsKey((long) i + numIters)); assertEquals( makeExpectedOutput( i + numIters, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY, keyStringForIndex(i)) .build(), removeDynamicFields(result.get((long) i + numIters))); } for (int i = 0; i < numIters; ++i) { server .whenGetWorkCalled() .thenReturn( makeInput( i + numIters * 2, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY)); } result = server.waitForAndGetCommits(numIters); worker.stop(); for (int i = 0; i < numIters; ++i) { assertTrue(result.containsKey((long) i + numIters * 2)); assertEquals( makeExpectedOutput( i + numIters * 2, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY, keyStringForIndex(i)) .build(), removeDynamicFields(result.get((long) i + numIters * 2))); } } @Test(timeout = 10000) public void testNumberOfWorkerHarnessThreadsIsHonored() throws Exception { int expectedNumberOfThreads = 5; List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction(blockingFn, 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); options.setNumberOfWorkerHarnessThreads(expectedNumberOfThreads); StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */); worker.start(); for (int i = 0; i < expectedNumberOfThreads * 2; ++i) { server.whenGetWorkCalled().thenReturn(makeInput(i, TimeUnit.MILLISECONDS.toMicros(i))); } BlockingFn.counter.acquire(expectedNumberOfThreads); if (BlockingFn.counter.tryAcquire(500, TimeUnit.MILLISECONDS)) { fail( "Expected number of threads " + expectedNumberOfThreads + " does not match actual " + "number of work items processed concurrently " + BlockingFn.callCounter.get() + "."); } BlockingFn.blocker.countDown(); } @Test public void testKeyTokenInvalidException() throws Exception { if (streamingEngine) { return; } KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(kvCoder), makeDoFnInstruction(new KeyTokenInvalidFn(), 0, kvCoder), makeSinkInstruction(kvCoder, 1)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); server .whenGetWorkCalled() .thenReturn(makeInput(0, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY)); StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */); worker.start(); server.waitForEmptyWorkQueue(); server .whenGetWorkCalled() .thenReturn(makeInput(1, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY)); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); assertEquals( makeExpectedOutput(1, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY, DEFAULT_KEY_STRING) .build(), removeDynamicFields(result.get(1L))); assertEquals(1, result.size()); } @Test public void testKeyCommitTooLargeException() throws Exception { KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(kvCoder), makeDoFnInstruction(new LargeCommitFn(), 0, kvCoder), makeSinkInstruction(kvCoder, 1)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); server.setExpectedExceptionCount(1); StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */); worker.setMaxWorkItemCommitBytes(1000); worker.start(); server .whenGetWorkCalled() .thenReturn(makeInput(1, 0, "large_key", DEFAULT_SHARDING_KEY)) .thenReturn(makeInput(2, 0, "key", DEFAULT_SHARDING_KEY)); server.waitForEmptyWorkQueue(); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); assertEquals(2, result.size()); assertEquals( makeExpectedOutput(2, 0, "key", DEFAULT_SHARDING_KEY, "key").build(), removeDynamicFields(result.get(2L))); assertTrue(result.containsKey(1L)); WorkItemCommitRequest largeCommit = result.get(1L); assertEquals("large_key", largeCommit.getKey().toStringUtf8()); assertEquals( makeExpectedTruncationRequestOutput( 1, "large_key", DEFAULT_SHARDING_KEY, largeCommit.getEstimatedWorkItemCommitBytes()) .build(), largeCommit); assertTrue(largeCommit.getEstimatedWorkItemCommitBytes() > 1000); int maxTries = 10; while (--maxTries > 0) { worker.reportPeriodicWorkerUpdates(); Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS); } ArgumentCaptor<WorkItemStatus> workItemStatusCaptor = ArgumentCaptor.forClass(WorkItemStatus.class); verify(mockWorkUnitClient, atLeast(2)).reportWorkItemStatus(workItemStatusCaptor.capture()); List<WorkItemStatus> capturedStatuses = workItemStatusCaptor.getAllValues(); boolean foundErrors = false; for (WorkItemStatus status : capturedStatuses) { if (!status.getErrors().isEmpty()) { assertFalse(foundErrors); foundErrors = true; String errorMessage = status.getErrors().get(0).getMessage(); assertThat(errorMessage, Matchers.containsString("KeyCommitTooLargeException")); } } assertTrue(foundErrors); } @Test public void testKeyChange() throws Exception { KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(kvCoder), makeDoFnInstruction(new ChangeKeysFn(), 0, kvCoder), makeSinkInstruction(kvCoder, 1)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); for (int i = 0; i < 2; i++) { server .whenGetWorkCalled() .thenReturn( makeInput( i, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY)) .thenReturn( makeInput( i + 1000, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY + i)); } StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */); worker.start(); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(4); for (int i = 0; i < 2; i++) { assertTrue(result.containsKey((long) i)); assertEquals( makeExpectedOutput( i, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY, keyStringForIndex(i) + "_data" + i) .build(), removeDynamicFields(result.get((long) i))); assertTrue(result.containsKey((long) i + 1000)); assertEquals( makeExpectedOutput( i + 1000, TimeUnit.MILLISECONDS.toMicros(i), keyStringForIndex(i), DEFAULT_SHARDING_KEY + i, keyStringForIndex(i) + "_data" + (i + 1000)) .build(), removeDynamicFields(result.get((long) i + 1000))); } } @Test(timeout = 30000) public void testExceptions() throws Exception { if (streamingEngine) { return; } List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction(new TestExceptionFn(), 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 1)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); server.setExpectedExceptionCount(1); String keyString = keyStringForIndex(0); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"" + DEFAULT_COMPUTATION_ID + "\"" + " input_data_watermark: 0" + " work {" + " key: \"" + keyString + "\"" + " sharding_key: 1" + " work_token: 0" + " cache_token: 1" + " message_bundles {" + " source_computation_id: \"" + DEFAULT_SOURCE_COMPUTATION_ID + "\"" + " messages {" + " timestamp: 0" + " data: \"0\"" + " }" + " }" + " }" + "}", CoderUtils.encodeToByteArray( CollectionCoder.of(IntervalWindow.getCoder()), Collections.singletonList(DEFAULT_WINDOW)))); StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */); worker.start(); server.waitForEmptyWorkQueue(); int maxTries = 10; while (maxTries-- > 0 && !worker.workExecutorIsEmpty()) { Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS); } assertTrue(worker.workExecutorIsEmpty()); maxTries = 10; while (maxTries-- > 0) { worker.reportPeriodicWorkerUpdates(); Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS); } ArgumentCaptor<WorkItemStatus> workItemStatusCaptor = ArgumentCaptor.forClass(WorkItemStatus.class); verify(mockWorkUnitClient, atLeast(1)).reportWorkItemStatus(workItemStatusCaptor.capture()); List<WorkItemStatus> capturedStatuses = workItemStatusCaptor.getAllValues(); boolean foundErrors = false; int lastUpdateWithoutErrors = 0; int lastUpdateWithErrors = 0; for (WorkItemStatus status : capturedStatuses) { if (status.getErrors().isEmpty()) { lastUpdateWithoutErrors++; continue; } lastUpdateWithErrors++; assertFalse(foundErrors); foundErrors = true; String stacktrace = status.getErrors().get(0).getMessage(); assertThat(stacktrace, Matchers.containsString("Exception!")); assertThat(stacktrace, Matchers.containsString("Another exception!")); assertThat(stacktrace, Matchers.containsString("processElement")); } assertTrue(foundErrors); assertTrue(lastUpdateWithoutErrors > lastUpdateWithErrors); assertThat(server.getStatsReceived().size(), Matchers.greaterThanOrEqualTo(1)); Windmill.ReportStatsRequest stats = server.getStatsReceived().get(0); assertEquals(DEFAULT_COMPUTATION_ID, stats.getComputationId()); assertEquals(keyString, stats.getKey().toStringUtf8()); assertEquals(0, stats.getWorkToken()); assertEquals(1, stats.getShardingKey()); } @Test public void testAssignWindows() throws Exception { Duration gapDuration = Duration.standardSeconds(1); CloudObject spec = CloudObject.forClassName("AssignWindowsDoFn"); SdkComponents sdkComponents = SdkComponents.create(); sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT); addString( spec, PropertyNames.SERIALIZED_FN, StringUtils.byteArrayToJsonString( WindowingStrategyTranslation.toMessageProto( WindowingStrategy.of(FixedWindows.of(gapDuration)), sdkComponents) .toByteArray())); ParallelInstruction addWindowsInstruction = new ParallelInstruction() .setSystemName("AssignWindows") .setName("AssignWindows") .setOriginalName("AssignWindowsOriginal") .setParDo( new ParDoInstruction() .setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0)) .setNumOutputs(1) .setUserFn(spec)) .setOutputs( Collections.singletonList( new InstructionOutput() .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setName("output") .setCodec( CloudObjects.asCloudObject( WindowedValue.getFullCoder( StringUtf8Coder.of(), IntervalWindow.getCoder()), /* sdkComponents= */ null)))); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), addWindowsInstruction, makeSinkInstruction(StringUtf8Coder.of(), 1)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); int timestamp1 = 0; int timestamp2 = 1000000; server .whenGetWorkCalled() .thenReturn(makeInput(timestamp1, timestamp1)) .thenReturn(makeInput(timestamp2, timestamp2)); StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */); worker.start(); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(2); assertThat( removeDynamicFields(result.get((long) timestamp1)), equalTo( setMessagesMetadata( PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ZERO), makeExpectedOutput(timestamp1, timestamp1)) .build())); assertThat( removeDynamicFields(result.get((long) timestamp2)), equalTo( setMessagesMetadata( PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ONE_SECOND), makeExpectedOutput(timestamp2, timestamp2)) .build())); } private void verifyTimers(WorkItemCommitRequest commit, Timer... timers) { assertThat(commit.getOutputTimersList(), Matchers.containsInAnyOrder(timers)); } private void verifyHolds(WorkItemCommitRequest commit, WatermarkHold... watermarkHolds) { assertThat(commit.getWatermarkHoldsList(), Matchers.containsInAnyOrder(watermarkHolds)); } private Timer buildWatermarkTimer(String tagPrefix, long timestampMillis) { return buildWatermarkTimer(tagPrefix, timestampMillis, false); } private Timer buildWatermarkTimer(String tagPrefix, long timestampMillis, boolean delete) { Timer.Builder builder = Timer.newBuilder() .setTag(ByteString.copyFromUtf8(tagPrefix + ":" + timestampMillis)) .setType(Type.WATERMARK) .setStateFamily("MergeWindows"); if (!delete) { builder.setTimestamp(timestampMillis * 1000); builder.setMetadataTimestamp(timestampMillis * 1000); } return builder.build(); } private WatermarkHold buildHold(String tag, long timestamp, boolean reset) { WatermarkHold.Builder builder = WatermarkHold.newBuilder() .setTag(ByteString.copyFromUtf8(tag)) .setStateFamily("MergeWindows"); if (reset) { builder.setReset(true); } if (timestamp >= 0) { builder.addTimestamps(timestamp * 1000); } return builder.build(); } @Test public void testMergeWindows() throws Exception { Coder<KV<String, String>> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()); Coder<WindowedValue<KV<String, String>>> windowedKvCoder = FullWindowedValueCoder.of(kvCoder, IntervalWindow.getCoder()); KvCoder<String, List<String>> groupedCoder = KvCoder.of(StringUtf8Coder.of(), ListCoder.of(StringUtf8Coder.of())); Coder<WindowedValue<KV<String, List<String>>>> windowedGroupedCoder = FullWindowedValueCoder.of(groupedCoder, IntervalWindow.getCoder()); CloudObject spec = CloudObject.forClassName("MergeWindowsDoFn"); SdkComponents sdkComponents = SdkComponents.create(); sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT); addString( spec, PropertyNames.SERIALIZED_FN, StringUtils.byteArrayToJsonString( WindowingStrategyTranslation.toMessageProto( WindowingStrategy.of(FixedWindows.of(Duration.standardSeconds(1))) .withTimestampCombiner(TimestampCombiner.EARLIEST), sdkComponents) .toByteArray())); addObject( spec, WorkerPropertyNames.INPUT_CODER, CloudObjects.asCloudObject(windowedKvCoder, /* sdkComponents= */ null)); ParallelInstruction mergeWindowsInstruction = new ParallelInstruction() .setSystemName("MergeWindows-System") .setName("MergeWindowsStep") .setOriginalName("MergeWindowsOriginal") .setParDo( new ParDoInstruction() .setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0)) .setNumOutputs(1) .setUserFn(spec)) .setOutputs( Collections.singletonList( new InstructionOutput() .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setName("output") .setCodec( CloudObjects.asCloudObject( windowedGroupedCoder, /* sdkComponents= */ null)))); List<ParallelInstruction> instructions = Arrays.asList( makeWindowingSourceInstruction(kvCoder), mergeWindowsInstruction, makeSinkInstruction(groupedCoder, 1)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */); Map<String, String> nameMap = new HashMap<>(); nameMap.put("MergeWindowsStep", "MergeWindows"); worker.addStateNameMappings(nameMap); worker.start(); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"" + DEFAULT_COMPUTATION_ID + "\"" + " input_data_watermark: 0" + " work {" + " key: \"" + DEFAULT_KEY_STRING + "\"" + " sharding_key: " + DEFAULT_SHARDING_KEY + " cache_token: 1" + " work_token: 1" + " message_bundles {" + " source_computation_id: \"" + DEFAULT_SOURCE_COMPUTATION_ID + "\"" + " messages {" + " timestamp: 0" + " data: \"" + dataStringForIndex(0) + "\"" + " }" + " }" + " }" + "}", intervalWindowBytes(WINDOW_AT_ZERO))); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Iterable<CounterUpdate> counters = worker.buildCounters(); String window = "/gAAAAAAAA-joBw/"; String timerTagPrefix = "/s" + window + "+0"; ByteString bufferTag = ByteString.copyFromUtf8(window + "+ubuf"); ByteString paneInfoTag = ByteString.copyFromUtf8(window + "+upane"); String watermarkDataHoldTag = window + "+uhold"; String watermarkExtraHoldTag = window + "+uextra"; String stateFamily = "MergeWindows"; ByteString bufferData = ByteString.copyFromUtf8("data0"); ByteString outputData = ByteString.copyFrom( new byte[] { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, 0x01, 0x05, 0x64, 0x61, 0x74, 0x61, 0x30, 0x00 }); long timerTimestamp = 999000L; WorkItemCommitRequest actualOutput = result.get(1L); verifyTimers(actualOutput, buildWatermarkTimer(timerTagPrefix, 999)); assertThat( actualOutput.getBagUpdatesList(), Matchers.contains( Matchers.equalTo( Windmill.TagBag.newBuilder() .setTag(bufferTag) .setStateFamily(stateFamily) .addValues(bufferData) .build()))); verifyHolds(actualOutput, buildHold(watermarkDataHoldTag, 0, false)); assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger())); assertEquals( Windmill.WorkItemCommitRequest.newBuilder(actualOutput) .clearCounterUpdates() .clearOutputMessages() .clearPerWorkItemLatencyAttributions() .build() .getSerializedSize(), splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger())); assertEquals( VarInt.getLength(0L) + dataStringForIndex(0).length() + addPaneTag(PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ZERO)).size() + 5L , splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger())); Windmill.GetWorkResponse.Builder getWorkResponse = Windmill.GetWorkResponse.newBuilder(); getWorkResponse .addWorkBuilder() .setComputationId(DEFAULT_COMPUTATION_ID) .setInputDataWatermark(timerTimestamp + 1000) .addWorkBuilder() .setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING)) .setShardingKey(DEFAULT_SHARDING_KEY) .setWorkToken(2) .setCacheToken(1) .getTimersBuilder() .addTimers(buildWatermarkTimer(timerTagPrefix, timerTimestamp)); server.whenGetWorkCalled().thenReturn(getWorkResponse.build()); long expectedBytesRead = 0L; Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder(); Windmill.KeyedGetDataResponse.Builder dataBuilder = dataResponse .addDataBuilder() .setComputationId(DEFAULT_COMPUTATION_ID) .addDataBuilder() .setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING)) .setShardingKey(DEFAULT_SHARDING_KEY); dataBuilder .addBagsBuilder() .setTag(bufferTag) .setStateFamily(stateFamily) .addValues(bufferData); dataBuilder .addWatermarkHoldsBuilder() .setTag(ByteString.copyFromUtf8(watermarkDataHoldTag)) .setStateFamily(stateFamily) .addTimestamps(0); dataBuilder .addWatermarkHoldsBuilder() .setTag(ByteString.copyFromUtf8(watermarkExtraHoldTag)) .setStateFamily(stateFamily) .addTimestamps(0); dataBuilder .addValuesBuilder() .setTag(paneInfoTag) .setStateFamily(stateFamily) .getValueBuilder() .setTimestamp(0) .setData(ByteString.EMPTY); server.whenGetDataCalled().thenReturn(dataResponse.build()); expectedBytesRead += dataBuilder.build().getSerializedSize(); result = server.waitForAndGetCommits(1); counters = worker.buildCounters(); actualOutput = result.get(2L); assertEquals(1, actualOutput.getOutputMessagesCount()); assertEquals( DEFAULT_DESTINATION_STREAM_ID, actualOutput.getOutputMessages(0).getDestinationStreamId()); assertEquals( DEFAULT_KEY_STRING, actualOutput.getOutputMessages(0).getBundles(0).getKey().toStringUtf8()); assertEquals(0, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getTimestamp()); assertEquals( outputData, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getData()); ByteString metadata = actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getMetadata(); InputStream inStream = metadata.newInput(); assertEquals( PaneInfo.createPane(true, true, Timing.ON_TIME), PaneInfoCoder.INSTANCE.decode(inStream)); assertEquals( Collections.singletonList(WINDOW_AT_ZERO), DEFAULT_WINDOW_COLLECTION_CODER.decode(inStream, Coder.Context.OUTER)); assertThat( "" + actualOutput.getValueUpdatesList(), actualOutput.getValueUpdatesList(), Matchers.contains( Matchers.equalTo( Windmill.TagValue.newBuilder() .setTag(paneInfoTag) .setStateFamily(stateFamily) .setValue( Windmill.Value.newBuilder() .setTimestamp(Long.MAX_VALUE) .setData(ByteString.EMPTY)) .build()))); assertThat( "" + actualOutput.getBagUpdatesList(), actualOutput.getBagUpdatesList(), Matchers.contains( Matchers.equalTo( Windmill.TagBag.newBuilder() .setTag(bufferTag) .setStateFamily(stateFamily) .setDeleteAll(true) .build()))); verifyHolds( actualOutput, buildHold(watermarkDataHoldTag, -1, true), buildHold(watermarkExtraHoldTag, -1, true)); assertEquals( expectedBytesRead, splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger())); assertEquals( Windmill.WorkItemCommitRequest.newBuilder(removeDynamicFields(actualOutput)) .clearCounterUpdates() .clearOutputMessages() .build() .getSerializedSize(), splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger())); assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger())); } @Test public void testMergeWindowsCaching() throws Exception { Coder<KV<String, String>> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()); Coder<WindowedValue<KV<String, String>>> windowedKvCoder = FullWindowedValueCoder.of(kvCoder, IntervalWindow.getCoder()); KvCoder<String, List<String>> groupedCoder = KvCoder.of(StringUtf8Coder.of(), ListCoder.of(StringUtf8Coder.of())); Coder<WindowedValue<KV<String, List<String>>>> windowedGroupedCoder = FullWindowedValueCoder.of(groupedCoder, IntervalWindow.getCoder()); CloudObject spec = CloudObject.forClassName("MergeWindowsDoFn"); SdkComponents sdkComponents = SdkComponents.create(); sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT); addString( spec, PropertyNames.SERIALIZED_FN, StringUtils.byteArrayToJsonString( WindowingStrategyTranslation.toMessageProto( WindowingStrategy.of(FixedWindows.of(Duration.standardSeconds(1))) .withTimestampCombiner(TimestampCombiner.EARLIEST), sdkComponents) .toByteArray())); addObject( spec, WorkerPropertyNames.INPUT_CODER, CloudObjects.asCloudObject(windowedKvCoder, /* sdkComponents= */ null)); ParallelInstruction mergeWindowsInstruction = new ParallelInstruction() .setSystemName("MergeWindows-System") .setName("MergeWindowsStep") .setOriginalName("MergeWindowsOriginal") .setParDo( new ParDoInstruction() .setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0)) .setNumOutputs(1) .setUserFn(spec)) .setOutputs( Collections.singletonList( new InstructionOutput() .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setName("output") .setCodec( CloudObjects.asCloudObject( windowedGroupedCoder, /* sdkComponents= */ null)))); List<ParallelInstruction> instructions = Arrays.asList( makeWindowingSourceInstruction(kvCoder), mergeWindowsInstruction, makeDoFnInstruction(new PassthroughDoFn(), 1, groupedCoder), makeSinkInstruction(groupedCoder, 2)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */); Map<String, String> nameMap = new HashMap<>(); nameMap.put("MergeWindowsStep", "MergeWindows"); worker.addStateNameMappings(nameMap); worker.start(); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"" + DEFAULT_COMPUTATION_ID + "\"" + " input_data_watermark: 0" + " work {" + " key: \"" + DEFAULT_KEY_STRING + "\"" + " sharding_key: " + DEFAULT_SHARDING_KEY + " cache_token: 1" + " work_token: 1" + " is_new_key: 1" + " message_bundles {" + " source_computation_id: \"" + DEFAULT_SOURCE_COMPUTATION_ID + "\"" + " messages {" + " timestamp: 0" + " data: \"" + dataStringForIndex(0) + "\"" + " }" + " }" + " }" + "}", intervalWindowBytes(WINDOW_AT_ZERO))); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Iterable<CounterUpdate> counters = worker.buildCounters(); String window = "/gAAAAAAAA-joBw/"; String timerTagPrefix = "/s" + window + "+0"; ByteString bufferTag = ByteString.copyFromUtf8(window + "+ubuf"); ByteString paneInfoTag = ByteString.copyFromUtf8(window + "+upane"); String watermarkDataHoldTag = window + "+uhold"; String watermarkExtraHoldTag = window + "+uextra"; String stateFamily = "MergeWindows"; ByteString bufferData = ByteString.copyFromUtf8("data0"); ByteString outputData = ByteString.copyFrom( new byte[] { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, 0x01, 0x05, 0x64, 0x61, 0x74, 0x61, 0x30, 0x00 }); long timerTimestamp = 999000L; WorkItemCommitRequest actualOutput = result.get(1L); verifyTimers(actualOutput, buildWatermarkTimer(timerTagPrefix, 999)); assertThat( actualOutput.getBagUpdatesList(), Matchers.contains( Matchers.equalTo( Windmill.TagBag.newBuilder() .setTag(bufferTag) .setStateFamily(stateFamily) .addValues(bufferData) .build()))); verifyHolds(actualOutput, buildHold(watermarkDataHoldTag, 0, false)); assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger())); assertEquals( Windmill.WorkItemCommitRequest.newBuilder(removeDynamicFields(actualOutput)) .clearCounterUpdates() .clearOutputMessages() .build() .getSerializedSize(), splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger())); assertEquals( VarInt.getLength(0L) + dataStringForIndex(0).length() + addPaneTag(PaneInfo.NO_FIRING, intervalWindowBytes(WINDOW_AT_ZERO)).size() + 5L , splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger())); Windmill.GetWorkResponse.Builder getWorkResponse = Windmill.GetWorkResponse.newBuilder(); getWorkResponse .addWorkBuilder() .setComputationId(DEFAULT_COMPUTATION_ID) .setInputDataWatermark(timerTimestamp + 1000) .addWorkBuilder() .setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING)) .setShardingKey(DEFAULT_SHARDING_KEY) .setWorkToken(2) .setCacheToken(1) .getTimersBuilder() .addTimers(buildWatermarkTimer(timerTagPrefix, timerTimestamp)); server.whenGetWorkCalled().thenReturn(getWorkResponse.build()); long expectedBytesRead = 0L; Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder(); Windmill.KeyedGetDataResponse.Builder dataBuilder = dataResponse .addDataBuilder() .setComputationId(DEFAULT_COMPUTATION_ID) .addDataBuilder() .setKey(ByteString.copyFromUtf8(DEFAULT_KEY_STRING)) .setShardingKey(DEFAULT_SHARDING_KEY); dataBuilder .addWatermarkHoldsBuilder() .setTag(ByteString.copyFromUtf8(watermarkExtraHoldTag)) .setStateFamily(stateFamily) .addTimestamps(0); dataBuilder .addValuesBuilder() .setTag(paneInfoTag) .setStateFamily(stateFamily) .getValueBuilder() .setTimestamp(0) .setData(ByteString.EMPTY); server.whenGetDataCalled().thenReturn(dataResponse.build()); expectedBytesRead += dataBuilder.build().getSerializedSize(); result = server.waitForAndGetCommits(1); counters = worker.buildCounters(); actualOutput = result.get(2L); assertEquals(1, actualOutput.getOutputMessagesCount()); assertEquals( DEFAULT_DESTINATION_STREAM_ID, actualOutput.getOutputMessages(0).getDestinationStreamId()); assertEquals( DEFAULT_KEY_STRING, actualOutput.getOutputMessages(0).getBundles(0).getKey().toStringUtf8()); assertEquals(0, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getTimestamp()); assertEquals( outputData, actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getData()); ByteString metadata = actualOutput.getOutputMessages(0).getBundles(0).getMessages(0).getMetadata(); InputStream inStream = metadata.newInput(); assertEquals( PaneInfo.createPane(true, true, Timing.ON_TIME), PaneInfoCoder.INSTANCE.decode(inStream)); assertEquals( Collections.singletonList(WINDOW_AT_ZERO), DEFAULT_WINDOW_COLLECTION_CODER.decode(inStream, Coder.Context.OUTER)); assertThat( "" + actualOutput.getValueUpdatesList(), actualOutput.getValueUpdatesList(), Matchers.contains( Matchers.equalTo( Windmill.TagValue.newBuilder() .setTag(paneInfoTag) .setStateFamily(stateFamily) .setValue( Windmill.Value.newBuilder() .setTimestamp(Long.MAX_VALUE) .setData(ByteString.EMPTY)) .build()))); assertThat( "" + actualOutput.getBagUpdatesList(), actualOutput.getBagUpdatesList(), Matchers.contains( Matchers.equalTo( Windmill.TagBag.newBuilder() .setTag(bufferTag) .setStateFamily(stateFamily) .setDeleteAll(true) .build()))); verifyHolds( actualOutput, buildHold(watermarkDataHoldTag, -1, true), buildHold(watermarkExtraHoldTag, -1, true)); assertEquals( expectedBytesRead, splitIntToLong(getCounter(counters, "WindmillStateBytesRead").getInteger())); assertEquals( Windmill.WorkItemCommitRequest.newBuilder(removeDynamicFields(actualOutput)) .clearCounterUpdates() .clearOutputMessages() .build() .getSerializedSize(), splitIntToLong(getCounter(counters, "WindmillStateBytesWritten").getInteger())); assertEquals(0L, splitIntToLong(getCounter(counters, "WindmillShuffleBytesRead").getInteger())); CacheStats stats = worker.stateCache.getCacheStats(); LOG.info("cache stats {}", stats); assertEquals(1, stats.hitCount()); assertEquals(4, stats.missCount()); } private void runMergeSessionsActions(List<Action> actions) throws Exception { Coder<KV<String, String>> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()); Coder<WindowedValue<KV<String, String>>> windowedKvCoder = FullWindowedValueCoder.of(kvCoder, IntervalWindow.getCoder()); KvCoder<String, List<String>> groupedCoder = KvCoder.of(StringUtf8Coder.of(), ListCoder.of(StringUtf8Coder.of())); Coder<WindowedValue<KV<String, List<String>>>> windowedGroupedCoder = FullWindowedValueCoder.of(groupedCoder, IntervalWindow.getCoder()); CloudObject spec = CloudObject.forClassName("MergeWindowsDoFn"); SdkComponents sdkComponents = SdkComponents.create(); sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT); addString( spec, PropertyNames.SERIALIZED_FN, StringUtils.byteArrayToJsonString( WindowingStrategyTranslation.toMessageProto( WindowingStrategy.of(Sessions.withGapDuration(Duration.millis(10))) .withMode(AccumulationMode.DISCARDING_FIRED_PANES) .withTrigger( Repeatedly.forever( AfterWatermark.pastEndOfWindow() .withLateFirings(AfterPane.elementCountAtLeast(1)))) .withAllowedLateness(Duration.standardMinutes(60)), sdkComponents) .toByteArray())); addObject( spec, WorkerPropertyNames.INPUT_CODER, CloudObjects.asCloudObject(windowedKvCoder, /* sdkComponents= */ null)); ParallelInstruction mergeWindowsInstruction = new ParallelInstruction() .setSystemName("MergeWindows-System") .setName("MergeWindowsStep") .setOriginalName("MergeWindowsOriginal") .setParDo( new ParDoInstruction() .setInput(new InstructionInput().setProducerInstructionIndex(0).setOutputNum(0)) .setNumOutputs(1) .setUserFn(spec)) .setOutputs( Collections.singletonList( new InstructionOutput() .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setName("output") .setCodec( CloudObjects.asCloudObject( windowedGroupedCoder, /* sdkComponents= */ null)))); List<ParallelInstruction> instructions = Arrays.asList( makeWindowingSourceInstruction(kvCoder), mergeWindowsInstruction, makeSinkInstruction(groupedCoder, 1)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), false /* publishCounters */); Map<String, String> nameMap = new HashMap<>(); nameMap.put("MergeWindowsStep", "MergeWindows"); worker.addStateNameMappings(nameMap); worker.start(); server.whenGetDataCalled().answerByDefault(EMPTY_DATA_RESPONDER); for (int i = 0; i < actions.size(); ++i) { Action action = actions.get(i); server.whenGetWorkCalled().thenReturn(action.response); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); WorkItemCommitRequest actualOutput = result.get(i + 1L); assertThat(actualOutput, Matchers.not(Matchers.nullValue())); verifyTimers(actualOutput, action.expectedTimers); verifyHolds(actualOutput, action.expectedHolds); } } @Test public void testMergeSessionWindows() throws Exception { runMergeSessionsActions( Collections.singletonList( new Action( buildSessionInput( 1, 40, 0, Collections.singletonList(1L), Collections.EMPTY_LIST)) .withHolds( buildHold("/gAAAAAAAAAsK/+uhold", -1, true), buildHold("/gAAAAAAAAAsK/+uextra", -1, true)) .withTimers(buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010)))); runMergeSessionsActions( Arrays.asList( new Action( buildSessionInput( 1, 0, 0, Collections.singletonList(1L), Collections.EMPTY_LIST)) .withHolds(buildHold("/gAAAAAAAAAsK/+uhold", 10, false)) .withTimers( buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 10), buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010)), new Action( buildSessionInput( 2, 30, 0, Collections.EMPTY_LIST, Collections.singletonList(buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 10)))) .withTimers(buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010)) .withHolds( buildHold("/gAAAAAAAAAsK/+uhold", -1, true), buildHold("/gAAAAAAAAAsK/+uextra", -1, true)), new Action( buildSessionInput( 3, 30, 0, Collections.singletonList(8L), Collections.EMPTY_LIST)) .withTimers( buildWatermarkTimer("/s/gAAAAAAAABIR/+0", 3600017), buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 10, true), buildWatermarkTimer("/s/gAAAAAAAAAsK/+0", 3600010, true)) .withHolds( buildHold("/gAAAAAAAAAsK/+uhold", -1, true), buildHold("/gAAAAAAAAAsK/+uextra", -1, true)), new Action( buildSessionInput( 4, 30, 0, Collections.singletonList(31L), Collections.EMPTY_LIST)) .withTimers( buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 3600040), buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 40)) .withHolds(buildHold("/gAAAAAAAACkK/+uhold", 40, false)), new Action(buildSessionInput(5, 30, 0, Arrays.asList(17L, 23L), Collections.EMPTY_LIST)) .withTimers( buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 3600040, true), buildWatermarkTimer("/s/gAAAAAAAACkK/+0", 40, true), buildWatermarkTimer("/s/gAAAAAAAABIR/+0", 3600017, true), buildWatermarkTimer("/s/gAAAAAAAABIR/+0", 17, true), buildWatermarkTimer("/s/gAAAAAAAACko/+0", 40), buildWatermarkTimer("/s/gAAAAAAAACko/+0", 3600040)) .withHolds( buildHold("/gAAAAAAAACkK/+uhold", -1, true), buildHold("/gAAAAAAAACkK/+uextra", -1, true), buildHold("/gAAAAAAAAAsK/+uhold", 40, true), buildHold("/gAAAAAAAAAsK/+uextra", 3600040, true)), new Action( buildSessionInput( 6, 50, 0, Collections.EMPTY_LIST, Collections.singletonList(buildWatermarkTimer("/s/gAAAAAAAACko/+0", 40)))) .withTimers(buildWatermarkTimer("/s/gAAAAAAAACko/+0", 3600040)) .withHolds( buildHold("/gAAAAAAAAAsK/+uhold", -1, true), buildHold("/gAAAAAAAAAsK/+uextra", -1, true)))); } private List<ParallelInstruction> makeUnboundedSourcePipeline() throws Exception { return makeUnboundedSourcePipeline(1, new PrintFn()); } private List<ParallelInstruction> makeUnboundedSourcePipeline( int numMessagesPerShard, DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> doFn) throws Exception { DataflowPipelineOptions options = PipelineOptionsFactory.create().as(DataflowPipelineOptions.class); options.setNumWorkers(1); CloudObject codec = CloudObjects.asCloudObject( WindowedValue.getFullCoder( ValueWithRecordId.ValueWithRecordIdCoder.of( KvCoder.of(VarIntCoder.of(), VarIntCoder.of())), GlobalWindow.Coder.INSTANCE), /* sdkComponents= */ null); return Arrays.asList( new ParallelInstruction() .setSystemName("Read") .setOriginalName("OriginalReadName") .setRead( new ReadInstruction() .setSource( CustomSources.serializeToCloudSource( new TestCountingSource(numMessagesPerShard), options) .setCodec(codec))) .setOutputs( Collections.singletonList( new InstructionOutput() .setName("read_output") .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setCodec(codec))), makeDoFnInstruction(doFn, 0, StringUtf8Coder.of(), WindowingStrategy.globalDefault()), makeSinkInstruction(StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE)); } @Test public void testUnboundedSources() throws Exception { List<Integer> finalizeTracker = Lists.newArrayList(); TestCountingSource.setFinalizeTracker(finalizeTracker); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorker worker = makeWorker( makeUnboundedSourcePipeline(), createTestingPipelineOptions(server), false /* publishCounters */); worker.start(); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 1" + " cache_token: 1" + " }" + "}", null)); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Iterable<CounterUpdate> counters = worker.buildCounters(); Windmill.WorkItemCommitRequest commit = result.get(1L); UnsignedLong finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0)); assertThat( removeDynamicFields(commit), equalTo( setMessagesMetadata( PaneInfo.NO_FIRING, CoderUtils.encodeToByteArray( CollectionCoder.of(GlobalWindow.Coder.INSTANCE), Collections.singletonList(GlobalWindow.INSTANCE)), parseCommitRequest( "key: \"0000000000000001\" " + "sharding_key: 1 " + "work_token: 1 " + "cache_token: 1 " + "source_backlog_bytes: 7 " + "source_bytes_processed: 18 " + "output_messages {" + " destination_stream_id: \"out\"" + " bundles {" + " key: \"0000000000000001\"" + " messages {" + " timestamp: 0" + " data: \"0:0\"" + " }" + " messages_ids: \"\"" + " }" + "} " + "source_state_updates {" + " state: \"\000\"" + " finalize_ids: " + finalizeId + "} " + "source_watermark: 1000")) .build())); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 2" + " cache_token: 1" + " source_state {" + " state: \"\001\"" + " finalize_ids: " + finalizeId + " } " + " }" + "}", null)); result = server.waitForAndGetCommits(1); counters = worker.buildCounters(); commit = result.get(2L); finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0)); assertThat( removeDynamicFields(commit), equalTo( parseCommitRequest( "key: \"0000000000000001\" " + "sharding_key: 1 " + "work_token: 2 " + "cache_token: 1 " + "source_backlog_bytes: 7 " + "source_bytes_processed: 0 " + "source_state_updates {" + " state: \"\000\"" + " finalize_ids: " + finalizeId + "} " + "source_watermark: 1000") .build())); assertThat(finalizeTracker, contains(0)); assertNull(getCounter(counters, "dataflow_input_size-computation")); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000002\"" + " sharding_key: 2" + " work_token: 3" + " cache_token: 2" + " source_state {" + " state: \"\000\"" + " } " + " }" + "}", null)); result = server.waitForAndGetCommits(1); counters = worker.buildCounters(); commit = result.get(3L); finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0)); assertThat( removeDynamicFields(commit), equalTo( parseCommitRequest( "key: \"0000000000000002\" " + "sharding_key: 2 " + "work_token: 3 " + "cache_token: 2 " + "source_backlog_bytes: 7 " + "source_bytes_processed: 0 " + "source_state_updates {" + " state: \"\000\"" + " finalize_ids: " + finalizeId + "} " + "source_watermark: 1000") .build())); assertNull(getCounter(counters, "dataflow_input_size-computation")); } @Test public void testUnboundedSourcesDrain() throws Exception { List<Integer> finalizeTracker = Lists.newArrayList(); TestCountingSource.setFinalizeTracker(finalizeTracker); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorker worker = makeWorker( makeUnboundedSourcePipeline(), createTestingPipelineOptions(server), true /* publishCounters */); worker.start(); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 2" + " cache_token: 3" + " }" + "}", null)); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Windmill.WorkItemCommitRequest commit = result.get(2L); UnsignedLong finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0)); assertThat( removeDynamicFields(commit), equalTo( setMessagesMetadata( PaneInfo.NO_FIRING, CoderUtils.encodeToByteArray( CollectionCoder.of(GlobalWindow.Coder.INSTANCE), Collections.singletonList(GlobalWindow.INSTANCE)), parseCommitRequest( "key: \"0000000000000001\" " + "sharding_key: 1 " + "work_token: 2 " + "cache_token: 3 " + "source_backlog_bytes: 7 " + "source_bytes_processed: 18 " + "output_messages {" + " destination_stream_id: \"out\"" + " bundles {" + " key: \"0000000000000001\"" + " messages {" + " timestamp: 0" + " data: \"0:0\"" + " }" + " messages_ids: \"\"" + " }" + "} " + "source_state_updates {" + " state: \"\000\"" + " finalize_ids: " + finalizeId + "} " + "source_watermark: 1000")) .build())); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 3" + " cache_token: 3" + " source_state {" + " only_finalize: true" + " finalize_ids: " + finalizeId + " }" + " }" + "}", null)); result = server.waitForAndGetCommits(1); commit = result.get(3L); assertThat( commit, equalTo( parseCommitRequest( "key: \"0000000000000001\" " + "sharding_key: 1 " + "work_token: 3 " + "cache_token: 3 " + "source_state_updates {" + " only_finalize: true" + "} ") .build())); assertThat(finalizeTracker, contains(0)); } @Test public void testUnboundedSourceWorkRetry() throws Exception { List<Integer> finalizeTracker = Lists.newArrayList(); TestCountingSource.setFinalizeTracker(finalizeTracker); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); options.setWorkerCacheMb(0); StreamingDataflowWorker worker = makeWorker(makeUnboundedSourcePipeline(), options, false /* publishCounters */); worker.start(); Windmill.GetWorkResponse work = buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 1" + " cache_token: 1" + " }" + "}", null); server.whenGetWorkCalled().thenReturn(work); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Iterable<CounterUpdate> counters = worker.buildCounters(); Windmill.WorkItemCommitRequest commit = result.get(1L); UnsignedLong finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0)); Windmill.WorkItemCommitRequest expectedCommit = setMessagesMetadata( PaneInfo.NO_FIRING, CoderUtils.encodeToByteArray( CollectionCoder.of(GlobalWindow.Coder.INSTANCE), Collections.singletonList(GlobalWindow.INSTANCE)), parseCommitRequest( "key: \"0000000000000001\" " + "sharding_key: 1 " + "work_token: 1 " + "cache_token: 1 " + "source_backlog_bytes: 7 " + "source_bytes_processed: 18 " + "output_messages {" + " destination_stream_id: \"out\"" + " bundles {" + " key: \"0000000000000001\"" + " messages {" + " timestamp: 0" + " data: \"0:0\"" + " }" + " messages_ids: \"\"" + " }" + "} " + "source_state_updates {" + " state: \"\000\"" + " finalize_ids: " + finalizeId + "} " + "source_watermark: 1000")) .build(); assertThat(removeDynamicFields(commit), equalTo(expectedCommit)); server.clearCommitsReceived(); server.whenGetWorkCalled().thenReturn(work); result = server.waitForAndGetCommits(1); commit = result.get(1L); finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0)); Windmill.WorkItemCommitRequest.Builder commitBuilder = expectedCommit.toBuilder(); commitBuilder .getSourceStateUpdatesBuilder() .setFinalizeIds(0, commit.getSourceStateUpdates().getFinalizeIds(0)); expectedCommit = commitBuilder.build(); assertThat(removeDynamicFields(commit), equalTo(expectedCommit)); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 2" + " cache_token: 1" + " source_state {" + " state: \"\001\"" + " finalize_ids: " + finalizeId + " } " + " }" + "}", null)); result = server.waitForAndGetCommits(1); commit = result.get(2L); finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0)); assertThat( removeDynamicFields(commit), equalTo( parseCommitRequest( "key: \"0000000000000001\" " + "sharding_key: 1 " + "work_token: 2 " + "cache_token: 1 " + "source_backlog_bytes: 7 " + "source_bytes_processed: 0 " + "source_state_updates {" + " state: \"\000\"" + " finalize_ids: " + finalizeId + "} " + "source_watermark: 1000") .build())); assertThat(finalizeTracker, contains(0)); } @Test public void testActiveWork() throws Exception { BoundedQueueExecutor mockExecutor = Mockito.mock(BoundedQueueExecutor.class); ComputationState computationState = new ComputationState( "computation", defaultMapTask(Collections.singletonList(makeSourceInstruction(StringUtf8Coder.of()))), mockExecutor, ImmutableMap.of(), null); ShardedKey key1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1); ShardedKey key2 = ShardedKey.create(ByteString.copyFromUtf8("key2"), 2); Work m1 = createMockWork(1); assertTrue(computationState.activateWork(key1, m1)); Mockito.verify(mockExecutor).execute(m1, m1.getWorkItem().getSerializedSize()); computationState.completeWorkAndScheduleNextWorkForKey(key1, 1); Mockito.verifyNoMoreInteractions(mockExecutor); Work m2 = createMockWork(2); assertTrue(computationState.activateWork(key1, m2)); Mockito.verify(mockExecutor).execute(m2, m2.getWorkItem().getSerializedSize()); Work m3 = createMockWork(3); assertTrue(computationState.activateWork(key1, m3)); Mockito.verifyNoMoreInteractions(mockExecutor); Work m4 = createMockWork(4); assertTrue(computationState.activateWork(key2, m4)); Mockito.verify(mockExecutor).execute(m4, m4.getWorkItem().getSerializedSize()); computationState.completeWorkAndScheduleNextWorkForKey(key2, 4); Mockito.verifyNoMoreInteractions(mockExecutor); computationState.completeWorkAndScheduleNextWorkForKey(key1, 2); Mockito.verify(mockExecutor).forceExecute(m3, m3.getWorkItem().getSerializedSize()); computationState.completeWorkAndScheduleNextWorkForKey(key1, 3); Mockito.verifyNoMoreInteractions(mockExecutor); Work m5 = createMockWork(5); computationState.activateWork(key1, m5); Mockito.verify(mockExecutor).execute(m5, m5.getWorkItem().getSerializedSize()); assertFalse(computationState.activateWork(key1, m5)); Mockito.verifyNoMoreInteractions(mockExecutor); computationState.completeWorkAndScheduleNextWorkForKey(key1, 5); Mockito.verifyNoMoreInteractions(mockExecutor); } @Test public void testActiveWorkForShardedKeys() throws Exception { BoundedQueueExecutor mockExecutor = Mockito.mock(BoundedQueueExecutor.class); ComputationState computationState = new ComputationState( "computation", defaultMapTask(Collections.singletonList(makeSourceInstruction(StringUtf8Coder.of()))), mockExecutor, ImmutableMap.of(), null); ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1); ShardedKey key1Shard2 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 2); Work m1 = createMockWork(1); assertTrue(computationState.activateWork(key1Shard1, m1)); Mockito.verify(mockExecutor).execute(m1, m1.getWorkItem().getSerializedSize()); computationState.completeWorkAndScheduleNextWorkForKey(key1Shard1, 1); Mockito.verifyNoMoreInteractions(mockExecutor); Work m2 = createMockWork(2); assertTrue(computationState.activateWork(key1Shard1, m2)); Mockito.verify(mockExecutor).execute(m2, m2.getWorkItem().getSerializedSize()); Work m3 = createMockWork(3); assertTrue(computationState.activateWork(key1Shard1, m3)); Mockito.verifyNoMoreInteractions(mockExecutor); Work m4 = createMockWork(3); assertFalse(computationState.activateWork(key1Shard1, m4)); Mockito.verifyNoMoreInteractions(mockExecutor); assertTrue(computationState.activateWork(key1Shard2, m4)); Mockito.verify(mockExecutor).execute(m4, m4.getWorkItem().getSerializedSize()); assertFalse(computationState.activateWork(key1Shard2, m4)); computationState.completeWorkAndScheduleNextWorkForKey(key1Shard2, 3); Mockito.verifyNoMoreInteractions(mockExecutor); } @Test @Ignore public void testMaxThreadMetric() throws Exception { int maxThreads = 2; int threadExpiration = 60; BoundedQueueExecutor executor = new BoundedQueueExecutor( maxThreads, threadExpiration, TimeUnit.SECONDS, maxThreads, 10000000, new ThreadFactoryBuilder() .setNameFormat("DataflowWorkUnits-%d") .setDaemon(true) .build()); ComputationState computationState = new ComputationState( "computation", defaultMapTask(Collections.singletonList(makeSourceInstruction(StringUtf8Coder.of()))), executor, ImmutableMap.of(), null); ShardedKey key1Shard1 = ShardedKey.create(ByteString.copyFromUtf8("key1"), 1); Consumer<Work> sleepProcessWorkFn = unused -> { try { Thread.sleep(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }; Work m2 = createMockWork(2, sleepProcessWorkFn); Work m3 = createMockWork(3, sleepProcessWorkFn); assertTrue(computationState.activateWork(key1Shard1, m2)); assertTrue(computationState.activateWork(key1Shard1, m3)); executor.execute(m2, m2.getWorkItem().getSerializedSize()); executor.execute(m3, m3.getWorkItem().getSerializedSize()); long i = 990L; assertTrue(executor.allThreadsActiveTime() >= i); executor.shutdown(); } volatile boolean stop = false; @Test @Test public void testExceptionInvalidatesCache() throws Exception { FakeWindmillServer server = new FakeWindmillServer(errorCollector); server.setExpectedExceptionCount(2); DataflowPipelineOptions options = createTestingPipelineOptions(server); options.setNumWorkers(1); DataflowPipelineDebugOptions debugOptions = options.as(DataflowPipelineDebugOptions.class); debugOptions.setUnboundedReaderMaxElements(1); CloudObject codec = CloudObjects.asCloudObject( WindowedValue.getFullCoder( ValueWithRecordId.ValueWithRecordIdCoder.of( KvCoder.of(VarIntCoder.of(), VarIntCoder.of())), GlobalWindow.Coder.INSTANCE), /* sdkComponents= */ null); TestCountingSource counter = new TestCountingSource(3).withThrowOnFirstSnapshot(true); List<ParallelInstruction> instructions = Arrays.asList( new ParallelInstruction() .setOriginalName("OriginalReadName") .setSystemName("Read") .setName(DEFAULT_PARDO_USER_NAME) .setRead( new ReadInstruction() .setSource( CustomSources.serializeToCloudSource(counter, options).setCodec(codec))) .setOutputs( Collections.singletonList( new InstructionOutput() .setName("read_output") .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setCodec(codec))), makeDoFnInstruction( new TestExceptionInvalidatesCacheFn(), 0, StringUtf8Coder.of(), WindowingStrategy.globalDefault()), makeSinkInstruction(StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE)); StreamingDataflowWorker worker = makeWorker( instructions, options.as(StreamingDataflowWorkerOptions.class), true /* publishCounters */); worker.setRetryLocallyDelayMs(100); worker.start(); for (int i = 0; i < 3; i++) { ByteString state; if (i == 0 || i == 1) { state = ByteString.EMPTY; } else { state = ByteString.copyFrom(new byte[] {42}); } Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder(); dataResponse .addDataBuilder() .setComputationId(DEFAULT_COMPUTATION_ID) .addDataBuilder() .setKey(ByteString.copyFromUtf8("0000000000000001")) .setShardingKey(1) .addValuesBuilder() .setTag(ByteString.copyFromUtf8(" .setStateFamily(DEFAULT_PARDO_STATE_FAMILY) .getValueBuilder() .setTimestamp(0) .setData(state); server.whenGetDataCalled().thenReturn(dataResponse.build()); } for (int i = 0; i < 3; i++) { StringBuilder sb = new StringBuilder(); sb.append("work {\n"); sb.append(" computation_id: \"computation\"\n"); sb.append(" input_data_watermark: 0\n"); sb.append(" work {\n"); sb.append(" key: \"0000000000000001\"\n"); sb.append(" sharding_key: 1\n"); sb.append(" work_token: "); sb.append(i); sb.append(" cache_token: 1"); sb.append("\n"); if (i > 0) { int previousCheckpoint = i - 1; sb.append(" source_state {\n"); sb.append(" state: \""); sb.append((char) previousCheckpoint); sb.append("\"\n"); sb.append(" }\n"); } sb.append(" }\n"); sb.append("}\n"); server.whenGetWorkCalled().thenReturn(buildInput(sb.toString(), null)); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Windmill.WorkItemCommitRequest commit = result.get((long) i); UnsignedLong finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0)); sb = new StringBuilder(); sb.append("key: \"0000000000000001\"\n"); sb.append("sharding_key: 1\n"); sb.append("work_token: "); sb.append(i); sb.append("\n"); sb.append("cache_token: 1\n"); sb.append("output_messages {\n"); sb.append(" destination_stream_id: \"out\"\n"); sb.append(" bundles {\n"); sb.append(" key: \"0000000000000001\"\n"); int messageNum = i; sb.append(" messages {\n"); sb.append(" timestamp: "); sb.append(messageNum * 1000); sb.append("\n"); sb.append(" data: \"0:"); sb.append(messageNum); sb.append("\"\n"); sb.append(" }\n"); sb.append(" messages_ids: \"\"\n"); sb.append(" }\n"); sb.append("}\n"); if (i == 0) { sb.append("value_updates {\n"); sb.append(" tag: \" sb.append(" value {\n"); sb.append(" timestamp: 0\n"); sb.append(" data: \""); sb.append((char) 42); sb.append("\"\n"); sb.append(" }\n"); sb.append(" state_family: \"parDoStateFamily\"\n"); sb.append("}\n"); } int sourceState = i; sb.append("source_state_updates {\n"); sb.append(" state: \""); sb.append((char) sourceState); sb.append("\"\n"); sb.append(" finalize_ids: "); sb.append(finalizeId); sb.append("}\n"); sb.append("source_watermark: "); sb.append((sourceState + 1) * 1000); sb.append("\n"); sb.append("source_backlog_bytes: 7\n"); assertThat( setValuesTimestamps( removeDynamicFields(commit) .toBuilder() .clearOutputTimers() .clearSourceBytesProcessed()) .build(), equalTo( setMessagesMetadata( PaneInfo.NO_FIRING, CoderUtils.encodeToByteArray( CollectionCoder.of(GlobalWindow.Coder.INSTANCE), ImmutableList.of(GlobalWindow.INSTANCE)), parseCommitRequest(sb.toString())) .build())); } } @Test public void testHugeCommits() throws Exception { List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction(new FanoutFn(), 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */); worker.start(); server.whenGetWorkCalled().thenReturn(makeInput(0, TimeUnit.MILLISECONDS.toMicros(0))); server.waitForAndGetCommits(0); worker.stop(); } @Test public void testActiveWorkRefresh() throws Exception { List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction(new SlowDoFn(), 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); options.setActiveWorkRefreshPeriodMillis(100); StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */); worker.start(); server.whenGetWorkCalled().thenReturn(makeInput(0, TimeUnit.MILLISECONDS.toMicros(0))); server.waitForAndGetCommits(1); worker.stop(); assertThat(server.numGetDataRequests(), greaterThan(0)); } @Test public void testLatencyAttributionProtobufsPopulated() { FakeClock clock = new FakeClock(); Work work = Work.create(null, clock, Collections.emptyList(), unused -> {}); clock.sleep(Duration.millis(10)); work.setState(Work.State.PROCESSING); clock.sleep(Duration.millis(20)); work.setState(Work.State.READING); clock.sleep(Duration.millis(30)); work.setState(Work.State.PROCESSING); clock.sleep(Duration.millis(40)); work.setState(Work.State.COMMIT_QUEUED); clock.sleep(Duration.millis(50)); work.setState(Work.State.COMMITTING); clock.sleep(Duration.millis(60)); Iterator<LatencyAttribution> it = work.getLatencyAttributions().iterator(); assertTrue(it.hasNext()); LatencyAttribution lat = it.next(); assertSame(State.QUEUED, lat.getState()); assertEquals(10, lat.getTotalDurationMillis()); assertTrue(it.hasNext()); lat = it.next(); assertSame(State.ACTIVE, lat.getState()); assertEquals(60, lat.getTotalDurationMillis()); assertTrue(it.hasNext()); lat = it.next(); assertSame(State.READING, lat.getState()); assertEquals(30, lat.getTotalDurationMillis()); assertTrue(it.hasNext()); lat = it.next(); assertSame(State.COMMITTING, lat.getState()); assertEquals(110, lat.getTotalDurationMillis()); assertFalse(it.hasNext()); } @Test public void testLatencyAttributionToQueuedState() throws Exception { final int workToken = 3232; FakeClock clock = new FakeClock(); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction( new FakeSlowDoFn(clock, Duration.millis(1000)), 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); options.setActiveWorkRefreshPeriodMillis(100); options.setNumberOfWorkerHarnessThreads(1); StreamingDataflowWorker worker = makeWorker( instructions, options, false /* publishCounters */, clock, clock::newFakeScheduledExecutor); worker.start(); ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER); server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO); server .whenGetWorkCalled() .thenReturn(makeInput(workToken + 1, 0 /* timestamp */)) .thenReturn(makeInput(workToken, 1 /* timestamp */)); server.waitForAndGetCommits(2); worker.stop(); assertEquals( awrSink.getLatencyAttributionDuration(workToken, State.QUEUED), Duration.millis(1000)); assertEquals(awrSink.getLatencyAttributionDuration(workToken + 1, State.QUEUED), Duration.ZERO); } @Test public void testLatencyAttributionToActiveState() throws Exception { final int workToken = 4242; FakeClock clock = new FakeClock(); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction( new FakeSlowDoFn(clock, Duration.millis(1000)), 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); options.setActiveWorkRefreshPeriodMillis(100); StreamingDataflowWorker worker = makeWorker( instructions, options, false /* publishCounters */, clock, clock::newFakeScheduledExecutor); worker.start(); ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER); server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO); server.whenGetWorkCalled().thenReturn(makeInput(workToken, 0 /* timestamp */)); server.waitForAndGetCommits(1); worker.stop(); assertEquals( awrSink.getLatencyAttributionDuration(workToken, State.ACTIVE), Duration.millis(1000)); } @Test public void testLatencyAttributionToReadingState() throws Exception { final int workToken = 5454; FakeClock clock = new FakeClock(); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction(new ReadingDoFn(), 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); options.setActiveWorkRefreshPeriodMillis(100); StreamingDataflowWorker worker = makeWorker( instructions, options, false /* publishCounters */, clock, clock::newFakeScheduledExecutor); worker.start(); ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink( (request) -> { clock.sleep(Duration.millis(1000)); return EMPTY_DATA_RESPONDER.apply(request); }); server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO); server.whenGetWorkCalled().thenReturn(makeInput(workToken, 0 /* timestamp */)); server.waitForAndGetCommits(1); worker.stop(); assertEquals( awrSink.getLatencyAttributionDuration(workToken, State.READING), Duration.millis(1000)); } @Test public void testLatencyAttributionToCommittingState() throws Exception { final int workToken = 6464; FakeClock clock = new FakeClock(); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); server .whenCommitWorkCalled() .answerByDefault( (request) -> { clock.sleep(Duration.millis(1000)); return Windmill.CommitWorkResponse.getDefaultInstance(); }); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); options.setActiveWorkRefreshPeriodMillis(100); StreamingDataflowWorker worker = makeWorker( instructions, options, false /* publishCounters */, clock, clock::newFakeScheduledExecutor); worker.start(); ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER); server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO); server.whenGetWorkCalled().thenReturn(makeInput(workToken, TimeUnit.MILLISECONDS.toMicros(0))); server.waitForAndGetCommits(1); worker.stop(); assertEquals( awrSink.getLatencyAttributionDuration(workToken, State.COMMITTING), Duration.millis(1000)); } @Test public void testLatencyAttributionPopulatedInCommitRequest() throws Exception { final int workToken = 7272; long dofnWaitTimeMs = 1000; FakeClock clock = new FakeClock(); List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeDoFnInstruction( new FakeSlowDoFn(clock, Duration.millis(dofnWaitTimeMs)), 0, StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); options.setActiveWorkRefreshPeriodMillis(100); options.setNumberOfWorkerHarnessThreads(1); StreamingDataflowWorker worker = makeWorker( instructions, options, false /* publishCounters */, clock, clock::newFakeScheduledExecutor); worker.start(); ActiveWorkRefreshSink awrSink = new ActiveWorkRefreshSink(EMPTY_DATA_RESPONDER); server.whenGetDataCalled().answerByDefault(awrSink::getData).delayEachResponseBy(Duration.ZERO); server.whenGetWorkCalled().thenReturn(makeInput(workToken, 1 /* timestamp */)); Map<Long, WorkItemCommitRequest> workItemCommitRequest = server.waitForAndGetCommits(1); worker.stop(); assertEquals( workItemCommitRequest.get((long) workToken).getPerWorkItemLatencyAttributions(0), LatencyAttribution.newBuilder() .setState(State.ACTIVE) .setTotalDurationMillis(dofnWaitTimeMs) .build()); if (streamingEngine) { assertEquals( workItemCommitRequest.get((long) workToken).getPerWorkItemLatencyAttributions(1), LatencyAttribution.newBuilder() .setState(State.GET_WORK_IN_TRANSIT_TO_USER_WORKER) .setTotalDurationMillis(1000) .build()); } } @Test public void testLimitOnOutputBundleSize() throws Exception { List<Integer> finalizeTracker = Lists.newArrayList(); TestCountingSource.setFinalizeTracker(finalizeTracker); final int numMessagesInCustomSourceShard = 100000; final int inflatedSizePerMessage = 10000; FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorker worker = makeWorker( makeUnboundedSourcePipeline( numMessagesInCustomSourceShard, new InflateDoFn(inflatedSizePerMessage)), createTestingPipelineOptions(server), false /* publishCounters */); worker.start(); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 1" + " cache_token: 1" + " }" + "}", null)); Matcher<Integer> isWithinBundleSizeLimits = both(greaterThan(StreamingDataflowWorker.MAX_SINK_BYTES * 9 / 10)) .and(lessThan(StreamingDataflowWorker.MAX_SINK_BYTES * 11 / 10)); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Windmill.WorkItemCommitRequest commit = result.get(1L); assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 2" + " cache_token: 1" + " }" + "}", null)); result = server.waitForAndGetCommits(1); commit = result.get(2L); assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits); } @Test public void testLimitOnOutputBundleSizeWithMultipleSinks() throws Exception { List<Integer> finalizeTracker = Lists.newArrayList(); TestCountingSource.setFinalizeTracker(finalizeTracker); final int numMessagesInCustomSourceShard = 100000; final int inflatedSizePerMessage = 10000; List<ParallelInstruction> instructions = new ArrayList<>(); instructions.addAll( makeUnboundedSourcePipeline( numMessagesInCustomSourceShard, new InflateDoFn(inflatedSizePerMessage))); instructions.add( makeSinkInstruction( DEFAULT_DESTINATION_STREAM_ID + "-1", StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE)); instructions.add( makeSinkInstruction( DEFAULT_DESTINATION_STREAM_ID + "-2", StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorker worker = makeWorker(instructions, createTestingPipelineOptions(server), true /* publishCounters */); worker.start(); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 1" + " cache_token: 1" + " }" + "}", null)); Matcher<Integer> isWithinBundleSizeLimits = both(greaterThan(StreamingDataflowWorker.MAX_SINK_BYTES * 9 / 10)) .and(lessThan(StreamingDataflowWorker.MAX_SINK_BYTES * 11 / 10)); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Windmill.WorkItemCommitRequest commit = result.get(1L); assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits); server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 2" + " cache_token: 1" + " }" + "}", null)); result = server.waitForAndGetCommits(1); commit = result.get(2L); assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits); } @Test public void testStuckCommit() throws Exception { if (!streamingEngine) { return; } List<ParallelInstruction> instructions = Arrays.asList( makeSourceInstruction(StringUtf8Coder.of()), makeSinkInstruction(StringUtf8Coder.of(), 0)); FakeWindmillServer server = new FakeWindmillServer(errorCollector); StreamingDataflowWorkerOptions options = createTestingPipelineOptions(server); options.setStuckCommitDurationMillis(2000); StreamingDataflowWorker worker = makeWorker(instructions, options, true /* publishCounters */); worker.start(); server.setDropStreamingCommits(true); server .whenGetWorkCalled() .thenReturn(makeInput(10, TimeUnit.MILLISECONDS.toMicros(2), DEFAULT_KEY_STRING, 1)) .thenReturn(makeInput(15, TimeUnit.MILLISECONDS.toMicros(3), DEFAULT_KEY_STRING, 5)); ConcurrentHashMap<Long, Consumer<CommitStatus>> droppedCommits = server.waitForDroppedCommits(2); server.setDropStreamingCommits(false); server .whenGetWorkCalled() .thenReturn(makeInput(1, TimeUnit.MILLISECONDS.toMicros(1), DEFAULT_KEY_STRING, 1)); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); droppedCommits.values().iterator().next().accept(CommitStatus.OK); worker.stop(); assertTrue(result.containsKey(1L)); assertEquals( makeExpectedOutput( 1, TimeUnit.MILLISECONDS.toMicros(1), DEFAULT_KEY_STRING, 1, DEFAULT_KEY_STRING) .build(), removeDynamicFields(result.get(1L))); } static class BlockingFn extends DoFn<String, String> implements TestRule { public static CountDownLatch blocker = new CountDownLatch(1); public static Semaphore counter = new Semaphore(0); public static AtomicInteger callCounter = new AtomicInteger(0); @ProcessElement public void processElement(ProcessContext c) throws InterruptedException { callCounter.incrementAndGet(); counter.release(); blocker.await(); c.output(c.element()); } @Override public Statement apply(final Statement base, final Description description) { return new Statement() { @Override public void evaluate() throws Throwable { blocker = new CountDownLatch(1); counter = new Semaphore(0); callCounter = new AtomicInteger(); base.evaluate(); } }; } } static class KeyTokenInvalidFn extends DoFn<KV<String, String>, KV<String, String>> { static boolean thrown = false; @ProcessElement public void processElement(ProcessContext c) { if (!thrown) { thrown = true; throw new KeyTokenInvalidException("key"); } else { c.output(c.element()); } } } static class LargeCommitFn extends DoFn<KV<String, String>, KV<String, String>> { @ProcessElement public void processElement(ProcessContext c) { if (c.element().getKey().equals("large_key")) { StringBuilder s = new StringBuilder(); for (int i = 0; i < 100; ++i) { s.append("large_commit"); } c.output(KV.of(c.element().getKey(), s.toString())); } else { c.output(c.element()); } } } static class ChangeKeysFn extends DoFn<KV<String, String>, KV<String, String>> { @ProcessElement public void processElement(ProcessContext c) { KV<String, String> elem = c.element(); c.output(KV.of(elem.getKey() + "_" + elem.getValue(), elem.getValue())); } } static class TestExceptionFn extends DoFn<String, String> { boolean firstTime = true; @ProcessElement public void processElement(ProcessContext c) throws Exception { if (firstTime) { firstTime = false; try { throw new Exception("Exception!"); } catch (Exception e) { throw new Exception("Another exception!", e); } } } } static class PassthroughDoFn extends DoFn<KV<String, Iterable<String>>, KV<String, Iterable<String>>> { @ProcessElement public void processElement(ProcessContext c) { c.output(c.element()); } } static class Action { GetWorkResponse response; Timer[] expectedTimers = new Timer[] {}; WatermarkHold[] expectedHolds = new WatermarkHold[] {}; public Action(GetWorkResponse response) { this.response = response; } Action withHolds(WatermarkHold... holds) { this.expectedHolds = holds; return this; } Action withTimers(Timer... timers) { this.expectedTimers = timers; return this; } } static class PrintFn extends DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> { @ProcessElement public void processElement(ProcessContext c) { KV<Integer, Integer> elem = c.element().getValue(); c.output(elem.getKey() + ":" + elem.getValue()); } } private static class MockWork { Work create(long workToken) { return Work.create( Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(), Instant::now, Collections.emptyList(), work -> {}); } } static class TestExceptionInvalidatesCacheFn extends DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> { static boolean thrown = false; @StateId("int") private final StateSpec<ValueState<Integer>> counter = StateSpecs.value(VarIntCoder.of()); @ProcessElement public void processElement(ProcessContext c, @StateId("int") ValueState<Integer> state) throws Exception { KV<Integer, Integer> elem = c.element().getValue(); if (elem.getValue() == 0) { LOG.error("**** COUNTER 0 ****"); assertNull(state.read()); state.write(42); assertEquals((Integer) 42, state.read()); } else if (elem.getValue() == 1) { LOG.error("**** COUNTER 1 ****"); assertEquals((Integer) 42, state.read()); } else if (elem.getValue() == 2) { if (!thrown) { LOG.error("**** COUNTER 2 (will throw) ****"); thrown = true; throw new Exception("Exception!"); } LOG.error("**** COUNTER 2 (retry) ****"); assertEquals((Integer) 42, state.read()); } else { throw new RuntimeException("only expecting values [0,2]"); } c.output(elem.getKey() + ":" + elem.getValue()); } } private static class FanoutFn extends DoFn<String, String> { @ProcessElement public void processElement(ProcessContext c) { StringBuilder builder = new StringBuilder(1000000); for (int i = 0; i < 1000000; i++) { builder.append(' '); } String largeString = builder.toString(); for (int i = 0; i < 3000; i++) { c.output(largeString); } } } private static class SlowDoFn extends DoFn<String, String> { @ProcessElement public void processElement(ProcessContext c) throws Exception { Thread.sleep(1000); c.output(c.element()); } } static class FakeClock implements Supplier<Instant> { private final PriorityQueue<Job> jobs = new PriorityQueue<>(); private Instant now = Instant.now(); public ScheduledExecutorService newFakeScheduledExecutor(String unused) { return new FakeScheduledExecutor(); } @Override public synchronized Instant get() { return now; } public synchronized void clear() { jobs.clear(); } public synchronized void sleep(Duration duration) { if (duration.isShorterThan(Duration.ZERO)) { throw new UnsupportedOperationException("Cannot sleep backwards in time"); } Instant endOfSleep = now.plus(duration); while (true) { Job job = jobs.peek(); if (job == null || job.when.isAfter(endOfSleep)) { break; } jobs.remove(); now = job.when; job.work.run(); } now = endOfSleep; } private synchronized void schedule(Duration fromNow, Runnable work) { jobs.add(new Job(now.plus(fromNow), work)); } private static class Job implements Comparable<Job> { final Instant when; final Runnable work; Job(Instant when, Runnable work) { this.when = when; this.work = work; } @Override public int compareTo(Job job) { return when.compareTo(job.when); } } private class FakeScheduledExecutor implements ScheduledExecutorService { @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { return true; } @Override public void execute(Runnable command) { throw new UnsupportedOperationException("Not implemented yet"); } @Override public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException { throw new UnsupportedOperationException("Not implemented yet"); } @Override public <T> List<Future<T>> invokeAll( Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException { throw new UnsupportedOperationException("Not implemented yet"); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws ExecutionException, InterruptedException { throw new UnsupportedOperationException("Not implemented yet"); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws ExecutionException, InterruptedException, TimeoutException { throw new UnsupportedOperationException("Not implemented yet"); } @Override public boolean isShutdown() { throw new UnsupportedOperationException("Not implemented yet"); } @Override public boolean isTerminated() { throw new UnsupportedOperationException("Not implemented yet"); } @Override public void shutdown() {} @Override public List<Runnable> shutdownNow() { throw new UnsupportedOperationException("Not implemented yet"); } @Override public <T> Future<T> submit(Callable<T> task) { throw new UnsupportedOperationException("Not implemented yet"); } @Override public Future<?> submit(Runnable task) { throw new UnsupportedOperationException("Not implemented yet"); } @Override public <T> Future<T> submit(Runnable task, T result) { throw new UnsupportedOperationException("Not implemented yet"); } @Override public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) { throw new UnsupportedOperationException("Not implemented yet"); } @Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { throw new UnsupportedOperationException("Not implemented yet"); } @Override public ScheduledFuture<?> scheduleAtFixedRate( Runnable command, long initialDelay, long period, TimeUnit unit) { throw new UnsupportedOperationException("Not implemented yet"); } @Override public ScheduledFuture<?> scheduleWithFixedDelay( Runnable command, long initialDelay, long delay, TimeUnit unit) { if (delay <= 0) { throw new UnsupportedOperationException( "Please supply a delay > 0 to scheduleWithFixedDelay"); } FakeClock.this.schedule( Duration.millis(unit.toMillis(initialDelay)), new Runnable() { @Override public void run() { command.run(); FakeClock.this.schedule(Duration.millis(unit.toMillis(delay)), this); } }); FakeClock.this.sleep(Duration.ZERO); return null; } } } private static class FakeSlowDoFn extends DoFn<String, String> { private static FakeClock clock; private final Duration sleep; FakeSlowDoFn(FakeClock clock, Duration sleep) { FakeSlowDoFn.clock = clock; this.sleep = sleep; } @ProcessElement public void processElement(ProcessContext c) throws Exception { clock.sleep(sleep); c.output(c.element()); } } static class ActiveWorkRefreshSink { private final Function<GetDataRequest, GetDataResponse> responder; private final Map<Long, EnumMap<LatencyAttribution.State, Duration>> totalDurations = new HashMap<>(); ActiveWorkRefreshSink(Function<GetDataRequest, GetDataResponse> responder) { this.responder = responder; } Duration getLatencyAttributionDuration(long workToken, LatencyAttribution.State state) { EnumMap<LatencyAttribution.State, Duration> durations = totalDurations.get(workToken); return durations == null ? Duration.ZERO : durations.getOrDefault(state, Duration.ZERO); } boolean isActiveWorkRefresh(GetDataRequest request) { for (ComputationGetDataRequest computationRequest : request.getRequestsList()) { if (!computationRequest.getComputationId().equals(DEFAULT_COMPUTATION_ID)) { return false; } for (KeyedGetDataRequest keyedRequest : computationRequest.getRequestsList()) { if (keyedRequest.getWorkToken() == 0 || keyedRequest.getShardingKey() != DEFAULT_SHARDING_KEY || keyedRequest.getValuesToFetchCount() != 0 || keyedRequest.getBagsToFetchCount() != 0 || keyedRequest.getTagValuePrefixesToFetchCount() != 0 || keyedRequest.getWatermarkHoldsToFetchCount() != 0) { return false; } } } return true; } GetDataResponse getData(GetDataRequest request) { if (!isActiveWorkRefresh(request)) { return responder.apply(request); } for (ComputationGetDataRequest computationRequest : request.getRequestsList()) { for (KeyedGetDataRequest keyedRequest : computationRequest.getRequestsList()) { for (LatencyAttribution la : keyedRequest.getLatencyAttributionList()) { EnumMap<LatencyAttribution.State, Duration> durations = totalDurations.computeIfAbsent( keyedRequest.getWorkToken(), (Long workToken) -> new EnumMap<LatencyAttribution.State, Duration>( LatencyAttribution.State.class)); Duration cur = Duration.millis(la.getTotalDurationMillis()); durations.compute(la.getState(), (s, d) -> d == null || d.isShorterThan(cur) ? cur : d); } } } return EMPTY_DATA_RESPONDER.apply(request); } } static class ReadingDoFn extends DoFn<String, String> { @StateId("int") private final StateSpec<ValueState<Integer>> counter = StateSpecs.value(VarIntCoder.of()); @ProcessElement public void processElement(ProcessContext c, @StateId("int") ValueState<Integer> state) { state.read(); c.output(c.element()); } } /** For each input element, emits a large string. */ private static class InflateDoFn extends DoFn<ValueWithRecordId<KV<Integer, Integer>>, String> { final int inflatedSize; /** For each input elements, outputs a string of this length */ InflateDoFn(int inflatedSize) { this.inflatedSize = inflatedSize; } @ProcessElement public void processElement(ProcessContext c) { char[] chars = new char[inflatedSize]; Arrays.fill(chars, ' '); c.output(new String(chars)); } } }
Thanks for this simplification ! Indeed, only needed to test at the source level, not need for the higher level stuff (server, pipeline, sending a message)
public void testCheckpointCoderIsSane() { SqsUnboundedSource source = new SqsUnboundedSource(mock(SqsIO.Read.class)); CoderProperties.coderSerializable(source.getCheckpointMarkCoder()); }
SqsUnboundedSource source = new SqsUnboundedSource(mock(SqsIO.Read.class));
public void testCheckpointCoderIsSane() { SqsUnboundedSource source = new SqsUnboundedSource(mock(SqsIO.Read.class)); CoderProperties.coderSerializable(source.getCheckpointMarkCoder()); }
class SqsUnboundedSourceTest { @Test }
class SqsUnboundedSourceTest { @Test }
LGTM. I extracted a single method in Write to populate the displayData with parameters to avoid repeating code.
public void populateDisplayData(DisplayData.Builder builder) { super.populateDisplayData(builder); spec.getSpannerConfig().populateDisplayData(builder); builder.add( DisplayData.item("batchSizeBytes", spec.getBatchSizeBytes()) .withLabel("Max batch size in sytes")); builder.add( DisplayData.item("maxNumMutations", spec.getMaxNumMutations()) .withLabel("Max number of mutated cells in each batch")); builder.add( DisplayData.item("maxNumRows", spec.getMaxNumRows()) .withLabel("Max number of rows in each batch")); builder.add( DisplayData.item("groupingFactor", spec.getGroupingFactor()) .withLabel("Number of batches to sort over")); }
builder.add(
public void populateDisplayData(DisplayData.Builder builder) { super.populateDisplayData(builder); populateDisplayDataWithParamaters(builder); }
class Builder { abstract Builder setSpannerConfig(SpannerConfig spannerConfig); abstract Builder setBatchSizeBytes(long batchSizeBytes); abstract Builder setMaxNumMutations(long maxNumMutations); abstract Builder setMaxNumRows(long maxNumRows); abstract Builder setFailureMode(FailureMode failureMode); abstract Builder setSchemaReadySignal(PCollection schemaReadySignal); abstract Builder setGroupingFactor(int groupingFactor); abstract Write build(); }
class Builder { abstract Builder setSpannerConfig(SpannerConfig spannerConfig); abstract Builder setBatchSizeBytes(long batchSizeBytes); abstract Builder setMaxNumMutations(long maxNumMutations); abstract Builder setMaxNumRows(long maxNumRows); abstract Builder setFailureMode(FailureMode failureMode); abstract Builder setSchemaReadySignal(PCollection schemaReadySignal); abstract Builder setGroupingFactor(int groupingFactor); abstract Write build(); }
We don't in any place doing this currently, given this is happening during shutdown maybe it's best we don't?
public static ExecutorService addShutdownHookSafely(ExecutorService executorService, Duration shutdownTimeout) { if (executorService == null) { return null; } Objects.requireNonNull(shutdownTimeout, "'shutdownTimeout' cannot be null."); if (shutdownTimeout.isZero() || shutdownTimeout.isNegative()) { throw new IllegalArgumentException("'shutdownTimeout' must be a non-zero positive duration."); } long timeoutNanos = shutdownTimeout.toNanos(); Thread shutdownThread = new Thread(() -> { try { executorService.shutdown(); if (!executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS)) { executorService.shutdownNow(); executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); executorService.shutdown(); } }); if (shutdownHookAccessHelper) { java.security.AccessController.doPrivileged((java.security.PrivilegedAction<Void>) () -> { Runtime.getRuntime().addShutdownHook(shutdownThread); return null; }); } else { Runtime.getRuntime().addShutdownHook(shutdownThread); } return executorService; }
Thread.currentThread().interrupt();
public static ExecutorService addShutdownHookSafely(ExecutorService executorService, Duration shutdownTimeout) { if (executorService == null) { return null; } Objects.requireNonNull(shutdownTimeout, "'shutdownTimeout' cannot be null."); if (shutdownTimeout.isZero() || shutdownTimeout.isNegative()) { throw new IllegalArgumentException("'shutdownTimeout' must be a non-zero positive duration."); } long timeoutNanos = shutdownTimeout.toNanos(); Thread shutdownThread = new Thread(() -> { try { executorService.shutdown(); if (!executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS)) { executorService.shutdownNow(); executorService.awaitTermination(timeoutNanos / 2, TimeUnit.NANOSECONDS); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); executorService.shutdown(); } }); if (ShutdownHookAccessHelperHolder.shutdownHookAccessHelper) { java.security.AccessController.doPrivileged((java.security.PrivilegedAction<Void>) () -> { Runtime.getRuntime().addShutdownHook(shutdownThread); return null; }); } else { Runtime.getRuntime().addShutdownHook(shutdownThread); } return executorService; }
class from an array of Objects. * * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] args, Class<T> clazz) { if (isNullOrEmpty(args)) { return null; } for (Object arg : args) { if (clazz.isInstance(arg)) { return clazz.cast(arg); } } return null; }
class from an array of Objects. * * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] args, Class<T> clazz) { if (isNullOrEmpty(args)) { return null; } for (Object arg : args) { if (clazz.isInstance(arg)) { return clazz.cast(arg); } } return null; }
use name is not good idea, if we have same name output in producer
public Rule build() { return logicalCTEConsumer().thenApply(ctx -> { LogicalCTEConsumer cteConsumer = ctx.root; int refCount = ctx.cascadesContext.cteReferencedCount(cteConsumer.getCteId()); if (ConnectContext.get().getSessionVariable().enablePipelineEngine && refCount > ConnectContext.get().getSessionVariable().inlineCTEReferencedThreshold) { return cteConsumer; } LogicalPlan inlinedPlan = ctx.cascadesContext.findCTEPlanForInline(cteConsumer.getCteId()); List<Slot> inlinedPlanOutput = inlinedPlan.getOutput(); List<Slot> cteConsumerOutput = cteConsumer.getOutput(); List<NamedExpression> projects = new ArrayList<>(); for (Slot inlineSlot : inlinedPlanOutput) { String name = inlineSlot.getName(); for (Slot consumerSlot : cteConsumerOutput) { if (consumerSlot.getName().equals(name)) { Alias alias = new Alias(consumerSlot.getExprId(), inlineSlot, name); projects.add(alias); break; } } } return new LogicalProject<LogicalPlan>(projects, inlinedPlan); }).toRule(RuleType.INLINE_CTE); }
}
public Rule build() { return logicalCTEConsumer().thenApply(ctx -> { LogicalCTEConsumer cteConsumer = ctx.root; int refCount = ctx.cascadesContext.cteReferencedCount(cteConsumer.getCteId()); /* * Current we only implement CTE Materialize on pipeline engine and only materialize those CTE whose * refCount > NereidsRewriter.INLINE_CTE_REFERENCED_THRESHOLD. */ if (ConnectContext.get().getSessionVariable().enablePipelineEngine && ConnectContext.get().getSessionVariable().enableCTEMaterialize && refCount > NereidsRewriter.INLINE_CTE_REFERENCED_THRESHOLD) { return cteConsumer; } LogicalPlan inlinedPlan = ctx.cascadesContext.findCTEPlanForInline(cteConsumer.getCteId()); List<Slot> inlinedPlanOutput = inlinedPlan.getOutput(); List<Slot> cteConsumerOutput = cteConsumer.getOutput(); List<NamedExpression> projects = new ArrayList<>(); for (Slot inlineSlot : inlinedPlanOutput) { String name = inlineSlot.getName(); for (Slot consumerSlot : cteConsumerOutput) { if (consumerSlot.getName().equals(name)) { Alias alias = new Alias(consumerSlot.getExprId(), inlineSlot, name); projects.add(alias); break; } } } return new LogicalProject<LogicalPlan>(projects, inlinedPlan); }).toRule(RuleType.INLINE_CTE); }
class InlineCTE extends OneRewriteRuleFactory { @Override }
class InlineCTE extends OneRewriteRuleFactory { @Override }
should be contains BoundStar? what about `select *, c1 from t group by 1`
private Plan bindAggregate(MatchingContext<LogicalAggregate<Plan>> ctx) { LogicalAggregate<Plan> agg = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer aggOutputAnalyzer = buildSimpleExprAnalyzer( agg, cascadesContext, agg.children(), true, true); List<NamedExpression> boundAggOutput = aggOutputAnalyzer.analyzeToList(agg.getOutputExpressions()); if (boundAggOutput.size() == 1 && boundAggOutput.get(0) instanceof BoundStar) { BoundStar boundStar = (BoundStar) boundAggOutput.get(0); List<NamedExpression> boundProjections = new ArrayList<>(); boundProjections.addAll(boundStar.getSlots()); boundAggOutput = boundProjections; } Supplier<Scope> aggOutputScopeWithoutAggFun = buildAggOutputScopeWithoutAggFun(boundAggOutput, cascadesContext); List<Expression> boundGroupBy = bindGroupBy( agg, agg.getGroupByExpressions(), boundAggOutput, aggOutputScopeWithoutAggFun, cascadesContext); return agg.withGroupByAndOutput(boundGroupBy, boundAggOutput); }
if (boundAggOutput.size() == 1 && boundAggOutput.get(0) instanceof BoundStar) {
private Plan bindAggregate(MatchingContext<LogicalAggregate<Plan>> ctx) { LogicalAggregate<Plan> agg = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer aggOutputAnalyzer = buildSimpleExprAnalyzer( agg, cascadesContext, agg.children(), true, true); List<NamedExpression> boundAggOutput = aggOutputAnalyzer.analyzeToList(agg.getOutputExpressions()); List<NamedExpression> boundProjections = new ArrayList<>(boundAggOutput.size()); for (NamedExpression output : boundAggOutput) { if (output instanceof BoundStar) { boundProjections.addAll(((BoundStar) output).getSlots()); } else { boundProjections.add(output); } } Supplier<Scope> aggOutputScopeWithoutAggFun = buildAggOutputScopeWithoutAggFun(boundProjections, cascadesContext); List<Expression> boundGroupBy = bindGroupBy( agg, agg.getGroupByExpressions(), boundProjections, aggOutputScopeWithoutAggFun, cascadesContext); return agg.withGroupByAndOutput(boundGroupBy, boundProjections); }
class BindExpression implements AnalysisRuleFactory { public static final Logger LOG = LogManager.getLogger(NereidsPlanner.class); @Override public List<Rule> buildRules() { /* * some rules not only depends on the condition Plan::canBind, for example, * BINDING_FILTER_SLOT need transform 'filter(unix_timestamp() > 100)' to * 'filter(unix_timestamp() > cast(100 as int))'. there is no any unbound expression * in the filter, so the Plan::canBind return false. * * we need `isAppliedRule` to judge whether a plan is applied to a rule, so need convert * the normal rule to `AppliedAwareRule` to read and write the mutable state. */ AppliedAwareRuleCondition ruleCondition = new AppliedAwareRuleCondition() { @Override protected boolean condition(Rule rule, Plan plan) { if (!rule.getPattern().matchRoot(plan)) { return false; } return plan.canBind() || (plan.bound() && !isAppliedRule(rule, plan)); } }; return ImmutableList.of( RuleType.BINDING_PROJECT_SLOT.build( logicalProject().thenApply(this::bindProject) ), RuleType.BINDING_FILTER_SLOT.build( logicalFilter().thenApply(this::bindFilter) ), RuleType.BINDING_USING_JOIN_SLOT.build( usingJoin().thenApply(this::bindUsingJoin) ), RuleType.BINDING_JOIN_SLOT.build( logicalJoin().thenApply(this::bindJoin) ), RuleType.BINDING_AGGREGATE_SLOT.build( logicalAggregate().thenApply(this::bindAggregate) ), RuleType.BINDING_REPEAT_SLOT.build( logicalRepeat().thenApply(this::bindRepeat) ), RuleType.BINDING_SORT_SLOT.build( logicalSort(any().whenNot(SetOperation.class::isInstance)) .thenApply(this::bindSortWithoutSetOperation) ), RuleType.BINDING_SORT_SET_OPERATION_SLOT.build( logicalSort(logicalSetOperation()).thenApply(this::bindSortWithSetOperation) ), RuleType.BINDING_HAVING_SLOT.build( logicalHaving(aggregate()).thenApply(this::bindHavingAggregate) ), RuleType.BINDING_HAVING_SLOT.build( logicalHaving(any().whenNot(Aggregate.class::isInstance)).thenApply(this::bindHaving) ), RuleType.BINDING_INLINE_TABLE_SLOT.build( logicalInlineTable().thenApply(this::bindInlineTable) ), RuleType.BINDING_ONE_ROW_RELATION_SLOT.build( unboundOneRowRelation().thenApply(this::bindOneRowRelation) ), RuleType.BINDING_SET_OPERATION_SLOT.build( logicalSetOperation().when(LogicalSetOperation::canBind).then(this::bindSetOperation) ), RuleType.BINDING_GENERATE_SLOT.build( logicalGenerate().when(AbstractPlan::canBind).thenApply(this::bindGenerate) ), RuleType.BINDING_UNBOUND_TVF_RELATION_FUNCTION.build( unboundTVFRelation().thenApply(this::bindTableValuedFunction) ), RuleType.BINDING_SUBQUERY_ALIAS_SLOT.build( logicalSubQueryAlias().thenApply(this::bindSubqueryAlias) ), RuleType.BINDING_RESULT_SINK.build( unboundResultSink().thenApply(this::bindResultSink) ) ).stream().map(ruleCondition).collect(ImmutableList.toImmutableList()); } private LogicalResultSink<Plan> bindResultSink(MatchingContext<UnboundResultSink<Plan>> ctx) { LogicalSink<Plan> sink = ctx.root; if (ctx.connectContext.getState().isQuery()) { List<NamedExpression> outputExprs = sink.child().getOutput().stream() .map(NamedExpression.class::cast) .collect(ImmutableList.toImmutableList()); return new LogicalResultSink<>(outputExprs, sink.child()); } final ImmutableListMultimap.Builder<ExprId, Integer> exprIdToIndexMapBuilder = ImmutableListMultimap.builder(); List<Slot> childOutput = sink.child().getOutput(); for (int index = 0; index < childOutput.size(); index++) { exprIdToIndexMapBuilder.put(childOutput.get(index).getExprId(), index); } InferPlanOutputAlias aliasInfer = new InferPlanOutputAlias(childOutput); List<NamedExpression> output = aliasInfer.infer(sink.child(), exprIdToIndexMapBuilder.build()); return new LogicalResultSink<>(output, sink.child()); } private LogicalSubQueryAlias<Plan> bindSubqueryAlias(MatchingContext<LogicalSubQueryAlias<Plan>> ctx) { LogicalSubQueryAlias<Plan> subQueryAlias = ctx.root; checkSameNameSlot(subQueryAlias.child(0).getOutput(), subQueryAlias.getAlias()); return subQueryAlias; } private LogicalPlan bindGenerate(MatchingContext<LogicalGenerate<Plan>> ctx) { LogicalGenerate<Plan> generate = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; Builder<Slot> outputSlots = ImmutableList.builder(); Builder<Function> boundGenerators = ImmutableList.builder(); List<Alias> expandAlias = Lists.newArrayList(); SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( generate, cascadesContext, generate.children(), true, true); for (int i = 0; i < generate.getGeneratorOutput().size(); i++) { UnboundSlot slot = (UnboundSlot) generate.getGeneratorOutput().get(i); Preconditions.checkState(slot.getNameParts().size() == 2, "the size of nameParts of UnboundSlot in LogicalGenerate must be 2."); Expression boundGenerator = analyzer.analyze(generate.getGenerators().get(i)); if (!(boundGenerator instanceof TableGeneratingFunction)) { throw new AnalysisException(boundGenerator.toSql() + " is not a TableGeneratingFunction"); } Function generator = (Function) boundGenerator; boundGenerators.add(generator); Slot boundSlot = new SlotReference(slot.getNameParts().get(1), generator.getDataType(), generator.nullable(), ImmutableList.of(slot.getNameParts().get(0))); outputSlots.add(boundSlot); if (generate.getExpandColumnAlias() != null && i < generate.getExpandColumnAlias().size() && !CollectionUtils.isEmpty(generate.getExpandColumnAlias().get(i))) { List<StructField> fields = ((StructType) boundSlot.getDataType()).getFields(); for (int idx = 0; idx < fields.size(); ++idx) { expandAlias.add(new Alias(new StructElement( boundSlot, new StringLiteral(fields.get(idx).getName())), generate.getExpandColumnAlias().get(i).get(idx))); } } } LogicalGenerate<Plan> ret = new LogicalGenerate<>( boundGenerators.build(), outputSlots.build(), generate.child()); if (!expandAlias.isEmpty()) { List<NamedExpression> allProjectSlots = generate.child().getOutput().stream() .map(NamedExpression.class::cast) .collect(Collectors.toList()); allProjectSlots.addAll(expandAlias); return new LogicalProject<>(allProjectSlots, ret); } return ret; } private LogicalSetOperation bindSetOperation(LogicalSetOperation setOperation) { if (setOperation.child(0).getOutput().size() != setOperation.child(1).getOutput().size()) { throw new AnalysisException("Operands have unequal number of columns:\n" + "'" + setOperation.child(0).getOutput() + "' has " + setOperation.child(0).getOutput().size() + " column(s)\n" + "'" + setOperation.child(1).getOutput() + "' has " + setOperation.child(1).getOutput().size() + " column(s)"); } if (setOperation.getQualifier() == Qualifier.ALL && (setOperation instanceof LogicalExcept || setOperation instanceof LogicalIntersect)) { throw new AnalysisException("INTERSECT and EXCEPT does not support ALL qualified"); } List<List<NamedExpression>> childrenProjections = setOperation.collectChildrenProjections(); int childrenProjectionSize = childrenProjections.size(); Builder<List<SlotReference>> childrenOutputs = ImmutableList.builderWithExpectedSize(childrenProjectionSize); Builder<Plan> newChildren = ImmutableList.builderWithExpectedSize(childrenProjectionSize); for (int i = 0; i < childrenProjectionSize; i++) { Plan newChild; if (childrenProjections.stream().allMatch(SlotReference.class::isInstance)) { newChild = setOperation.child(i); } else { newChild = new LogicalProject<>(childrenProjections.get(i), setOperation.child(i)); } newChildren.add(newChild); childrenOutputs.add((List<SlotReference>) (List) newChild.getOutput()); } setOperation = setOperation.withChildrenAndTheirOutputs(newChildren.build(), childrenOutputs.build()); List<NamedExpression> newOutputs = setOperation.buildNewOutputs(); return setOperation.withNewOutputs(newOutputs); } @NotNull private LogicalOneRowRelation bindOneRowRelation(MatchingContext<UnboundOneRowRelation> ctx) { UnboundOneRowRelation oneRowRelation = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( oneRowRelation, cascadesContext, ImmutableList.of(), true, true); List<NamedExpression> projects = analyzer.analyzeToList(oneRowRelation.getProjects()); return new LogicalOneRowRelation(oneRowRelation.getRelationId(), projects); } private LogicalPlan bindInlineTable(MatchingContext<LogicalInlineTable> ctx) { LogicalInlineTable logicalInlineTable = ctx.root; List<LogicalPlan> relations = Lists.newArrayListWithCapacity(logicalInlineTable.getConstantExprsList().size()); for (int i = 0; i < logicalInlineTable.getConstantExprsList().size(); i++) { for (NamedExpression constantExpr : logicalInlineTable.getConstantExprsList().get(i)) { if (constantExpr instanceof DefaultValueSlot) { throw new AnalysisException("Default expression" + " can't exist in SELECT statement at row " + (i + 1)); } } relations.add(new UnboundOneRowRelation(StatementScopeIdGenerator.newRelationId(), logicalInlineTable.getConstantExprsList().get(i))); } return LogicalPlanBuilder.reduceToLogicalPlanTree(0, relations.size() - 1, relations, Qualifier.ALL); } private LogicalHaving<Plan> bindHaving(MatchingContext<LogicalHaving<Plan>> ctx) { LogicalHaving<Plan> having = ctx.root; Plan childPlan = having.child(); CascadesContext cascadesContext = ctx.cascadesContext; Scope childOutput = toScope(cascadesContext, childPlan.getOutput()); Supplier<Scope> childChildrenOutput = Suppliers.memoize(() -> toScope(cascadesContext, PlanUtils.fastGetChildrenOutputs(childPlan.children())) ); return bindHavingByScopes(having, cascadesContext, childOutput, childChildrenOutput); } private LogicalHaving<Plan> bindHavingAggregate( MatchingContext<LogicalHaving<Aggregate<Plan>>> ctx) { LogicalHaving<Aggregate<Plan>> having = ctx.root; Aggregate<Plan> aggregate = having.child(); CascadesContext cascadesContext = ctx.cascadesContext; Supplier<CustomSlotBinderAnalyzer> bindByAggChild = Suppliers.memoize(() -> { Scope aggChildOutputScope = toScope(cascadesContext, PlanUtils.fastGetChildrenOutputs(aggregate.children())); return (analyzer, unboundSlot) -> analyzer.bindSlotByScope(unboundSlot, aggChildOutputScope); }); Scope aggOutputScope = toScope(cascadesContext, aggregate.getOutput()); Supplier<CustomSlotBinderAnalyzer> bindByGroupByThenAggOutputThenAggChild = Suppliers.memoize(() -> { List<Expression> groupByExprs = aggregate.getGroupByExpressions(); ImmutableList.Builder<Slot> groupBySlots = ImmutableList.builderWithExpectedSize(groupByExprs.size()); for (Expression groupBy : groupByExprs) { if (groupBy instanceof Slot) { groupBySlots.add((Slot) groupBy); } } Scope groupBySlotsScope = toScope(cascadesContext, groupBySlots.build()); return (analyzer, unboundSlot) -> { List<Slot> boundInGroupBy = analyzer.bindSlotByScope(unboundSlot, groupBySlotsScope); if (!boundInGroupBy.isEmpty()) { return ImmutableList.of(boundInGroupBy.get(0)); } List<Slot> boundInAggOutput = analyzer.bindSlotByScope(unboundSlot, aggOutputScope); if (!boundInAggOutput.isEmpty()) { return ImmutableList.of(boundInAggOutput.get(0)); } List<? extends Expression> expressions = bindByAggChild.get().bindSlot(analyzer, unboundSlot); return expressions.isEmpty() ? expressions : ImmutableList.of(expressions.get(0)); }; }); FunctionRegistry functionRegistry = cascadesContext.getConnectContext().getEnv().getFunctionRegistry(); ExpressionAnalyzer havingAnalyzer = new ExpressionAnalyzer(having, aggOutputScope, cascadesContext, false, true) { private boolean currentIsInAggregateFunction; @Override public Expression visitAggregateFunction(AggregateFunction aggregateFunction, ExpressionRewriteContext context) { if (!currentIsInAggregateFunction) { currentIsInAggregateFunction = true; try { return super.visitAggregateFunction(aggregateFunction, context); } finally { currentIsInAggregateFunction = false; } } else { return super.visitAggregateFunction(aggregateFunction, context); } } @Override public Expression visitUnboundFunction(UnboundFunction unboundFunction, ExpressionRewriteContext context) { if (!currentIsInAggregateFunction && isAggregateFunction(unboundFunction, functionRegistry)) { currentIsInAggregateFunction = true; try { return super.visitUnboundFunction(unboundFunction, context); } finally { currentIsInAggregateFunction = false; } } else { return super.visitUnboundFunction(unboundFunction, context); } } @Override protected List<? extends Expression> bindSlotByThisScope(UnboundSlot unboundSlot) { if (currentIsInAggregateFunction) { return bindByAggChild.get().bindSlot(this, unboundSlot); } else { return bindByGroupByThenAggOutputThenAggChild.get().bindSlot(this, unboundSlot); } } }; Set<Expression> havingExprs = having.getConjuncts(); ImmutableSet.Builder<Expression> analyzedHaving = ImmutableSet.builderWithExpectedSize(havingExprs.size()); ExpressionRewriteContext rewriteContext = new ExpressionRewriteContext(cascadesContext); for (Expression expression : havingExprs) { analyzedHaving.add(havingAnalyzer.analyze(expression, rewriteContext)); } return new LogicalHaving<>(analyzedHaving.build(), having.child()); } private LogicalHaving<Plan> bindHavingByScopes( LogicalHaving<? extends Plan> having, CascadesContext cascadesContext, Scope defaultScope, Supplier<Scope> backupScope) { Plan child = having.child(); SimpleExprAnalyzer analyzer = buildCustomSlotBinderAnalyzer( having, cascadesContext, defaultScope, false, true, (self, unboundSlot) -> { List<Slot> slots = self.bindSlotByScope(unboundSlot, defaultScope); if (!slots.isEmpty()) { return slots; } return self.bindSlotByScope(unboundSlot, backupScope.get()); }); ImmutableSet.Builder<Expression> boundConjuncts = ImmutableSet.builderWithExpectedSize(having.getConjuncts().size()); for (Expression conjunct : having.getConjuncts()) { conjunct = analyzer.analyze(conjunct); conjunct = TypeCoercionUtils.castIfNotSameType(conjunct, BooleanType.INSTANCE); boundConjuncts.add(conjunct); } checkIfOutputAliasNameDuplicatedForGroupBy(boundConjuncts.build(), child instanceof LogicalProject ? ((LogicalProject<?>) child).getOutputs() : child.getOutput()); return new LogicalHaving<>(boundConjuncts.build(), having.child()); } private LogicalSort<LogicalSetOperation> bindSortWithSetOperation( MatchingContext<LogicalSort<LogicalSetOperation>> ctx) { LogicalSort<LogicalSetOperation> sort = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; List<Slot> childOutput = sort.child().getOutput(); SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( sort, cascadesContext, sort.children(), true, true); Builder<OrderKey> boundKeys = ImmutableList.builderWithExpectedSize(sort.getOrderKeys().size()); for (OrderKey orderKey : sort.getOrderKeys()) { Expression boundKey = bindWithOrdinal(orderKey.getExpr(), analyzer, childOutput); boundKeys.add(orderKey.withExpression(boundKey)); } return new LogicalSort<>(boundKeys.build(), sort.child()); } private LogicalJoin<Plan, Plan> bindJoin(MatchingContext<LogicalJoin<Plan, Plan>> ctx) { LogicalJoin<Plan, Plan> join = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( join, cascadesContext, join.children(), true, true); Builder<Expression> hashJoinConjuncts = ImmutableList.builderWithExpectedSize( join.getHashJoinConjuncts().size()); for (Expression hashJoinConjunct : join.getHashJoinConjuncts()) { hashJoinConjunct = analyzer.analyze(hashJoinConjunct); hashJoinConjunct = TypeCoercionUtils.castIfNotSameType(hashJoinConjunct, BooleanType.INSTANCE); hashJoinConjuncts.add(hashJoinConjunct); } Builder<Expression> otherJoinConjuncts = ImmutableList.builderWithExpectedSize( join.getOtherJoinConjuncts().size()); for (Expression otherJoinConjunct : join.getOtherJoinConjuncts()) { otherJoinConjunct = analyzer.analyze(otherJoinConjunct); otherJoinConjunct = TypeCoercionUtils.castIfNotSameType(otherJoinConjunct, BooleanType.INSTANCE); otherJoinConjuncts.add(otherJoinConjunct); } return new LogicalJoin<>(join.getJoinType(), hashJoinConjuncts.build(), otherJoinConjuncts.build(), join.getDistributeHint(), join.getMarkJoinSlotReference(), join.children(), null); } private LogicalJoin<Plan, Plan> bindUsingJoin(MatchingContext<UsingJoin<Plan, Plan>> ctx) { UsingJoin<Plan, Plan> using = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; List<Expression> unboundHashJoinConjunct = using.getHashJoinConjuncts(); List<Slot> leftOutput = Utils.reverseImmutableList(using.left().getOutput()); Scope leftScope = toScope(cascadesContext, ExpressionUtils.distinctSlotByName(leftOutput)); Scope rightScope = toScope(cascadesContext, ExpressionUtils.distinctSlotByName(using.right().getOutput())); ExpressionRewriteContext rewriteContext = new ExpressionRewriteContext(cascadesContext); Builder<Expression> hashEqExprs = ImmutableList.builderWithExpectedSize(unboundHashJoinConjunct.size()); for (Expression usingColumn : unboundHashJoinConjunct) { ExpressionAnalyzer leftExprAnalyzer = new ExpressionAnalyzer( using, leftScope, cascadesContext, true, false); Expression usingLeftSlot = leftExprAnalyzer.analyze(usingColumn, rewriteContext); ExpressionAnalyzer rightExprAnalyzer = new ExpressionAnalyzer( using, rightScope, cascadesContext, true, false); Expression usingRightSlot = rightExprAnalyzer.analyze(usingColumn, rewriteContext); hashEqExprs.add(new EqualTo(usingLeftSlot, usingRightSlot)); } return new LogicalJoin<>( using.getJoinType() == JoinType.CROSS_JOIN ? JoinType.INNER_JOIN : using.getJoinType(), hashEqExprs.build(), using.getOtherJoinConjuncts(), using.getDistributeHint(), using.getMarkJoinSlotReference(), using.children(), null); } private Plan bindProject(MatchingContext<LogicalProject<Plan>> ctx) { LogicalProject<Plan> project = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( project, cascadesContext, project.children(), true, true); List<NamedExpression> excepts = project.getExcepts(); Supplier<Set<NamedExpression>> boundExcepts = Suppliers.memoize( () -> analyzer.analyzeToSet(project.getExcepts())); Builder<NamedExpression> boundProjections = ImmutableList.builderWithExpectedSize(project.arity()); StatementContext statementContext = ctx.statementContext; for (Expression expression : project.getProjects()) { Expression expr = analyzer.analyze(expression); if (!(expr instanceof BoundStar)) { boundProjections.add((NamedExpression) expr); } else { BoundStar boundStar = (BoundStar) expr; List<Slot> slots = boundStar.getSlots(); if (!excepts.isEmpty()) { slots = Utils.filterImmutableList(slots, slot -> !boundExcepts.get().contains(slot)); } boundProjections.addAll(slots); List<Slot> slotsForLambda = slots; UnboundStar unboundStar = (UnboundStar) expression; unboundStar.getIndexInSqlString().ifPresent(pair -> { statementContext.addIndexInSqlToString(pair, toSqlWithBackquote(slotsForLambda)); }); } } return project.withProjects(boundProjections.build()); } private Plan bindFilter(MatchingContext<LogicalFilter<Plan>> ctx) { LogicalFilter<Plan> filter = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( filter, cascadesContext, filter.children(), true, true); ImmutableSet.Builder<Expression> boundConjuncts = ImmutableSet.builderWithExpectedSize( filter.getConjuncts().size()); for (Expression conjunct : filter.getConjuncts()) { Expression boundConjunct = analyzer.analyze(conjunct); boundConjunct = TypeCoercionUtils.castIfNotSameType(boundConjunct, BooleanType.INSTANCE); boundConjuncts.add(boundConjunct); } return new LogicalFilter<>(boundConjuncts.build(), filter.child()); } private Plan bindRepeat(MatchingContext<LogicalRepeat<Plan>> ctx) { LogicalRepeat<Plan> repeat = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer repeatOutputAnalyzer = buildSimpleExprAnalyzer( repeat, cascadesContext, repeat.children(), true, true); List<NamedExpression> boundRepeatOutput = repeatOutputAnalyzer.analyzeToList(repeat.getOutputExpressions()); Supplier<Scope> aggOutputScopeWithoutAggFun = buildAggOutputScopeWithoutAggFun(boundRepeatOutput, cascadesContext); Builder<List<Expression>> boundGroupingSetsBuilder = ImmutableList.builderWithExpectedSize(repeat.getGroupingSets().size()); for (List<Expression> groupingSet : repeat.getGroupingSets()) { List<Expression> boundGroupingSet = bindGroupBy( repeat, groupingSet, boundRepeatOutput, aggOutputScopeWithoutAggFun, cascadesContext); boundGroupingSetsBuilder.add(boundGroupingSet); } List<List<Expression>> boundGroupingSets = boundGroupingSetsBuilder.build(); List<NamedExpression> nullableOutput = PlanUtils.adjustNullableForRepeat(boundGroupingSets, boundRepeatOutput); for (List<Expression> groupingSet : boundGroupingSets) { checkIfOutputAliasNameDuplicatedForGroupBy(groupingSet, nullableOutput); } Set<Slot> groupingExprs = boundGroupingSets.stream() .flatMap(Collection::stream).map(expr -> expr.getInputSlots()) .flatMap(Collection::stream).collect(Collectors.toSet()); Set<GroupingScalarFunction> groupingScalarFunctions = ExpressionUtils .collect(nullableOutput, GroupingScalarFunction.class::isInstance); for (GroupingScalarFunction function : groupingScalarFunctions) { if (!groupingExprs.containsAll(function.getInputSlots())) { throw new AnalysisException("Column in " + function.getName() + " does not exist in GROUP BY clause."); } } return repeat.withGroupSetsAndOutput(boundGroupingSets, nullableOutput); } private List<Expression> bindGroupBy( Aggregate<Plan> agg, List<Expression> groupBy, List<NamedExpression> boundAggOutput, Supplier<Scope> aggOutputScopeWithoutAggFun, CascadesContext cascadesContext) { Scope childOutputScope = toScope(cascadesContext, agg.child().getOutput()); SimpleExprAnalyzer analyzer = buildCustomSlotBinderAnalyzer( agg, cascadesContext, childOutputScope, true, true, (self, unboundSlot) -> { List<Slot> slotsInChildren = self.bindExactSlotsByThisScope(unboundSlot, childOutputScope); if (slotsInChildren.size() == 1) { return slotsInChildren; } List<Slot> slotsInOutput = self.bindExactSlotsByThisScope( unboundSlot, aggOutputScopeWithoutAggFun.get()); if (slotsInOutput.isEmpty()) { return slotsInChildren; } Builder<Expression> useOutputExpr = ImmutableList.builderWithExpectedSize(slotsInOutput.size()); for (Slot slotInOutput : slotsInOutput) { MappingSlot mappingSlot = (MappingSlot) slotInOutput; useOutputExpr.add(mappingSlot.getMappingExpression()); } return useOutputExpr.build(); }); ImmutableList.Builder<Expression> boundGroupByBuilder = ImmutableList.builderWithExpectedSize(groupBy.size()); for (Expression key : groupBy) { boundGroupByBuilder.add(bindWithOrdinal(key, analyzer, boundAggOutput)); } List<Expression> boundGroupBy = boundGroupByBuilder.build(); checkIfOutputAliasNameDuplicatedForGroupBy(boundGroupBy, boundAggOutput); return boundGroupBy; } private Supplier<Scope> buildAggOutputScopeWithoutAggFun( List<? extends NamedExpression> boundAggOutput, CascadesContext cascadesContext) { return Suppliers.memoize(() -> { Builder<MappingSlot> nonAggFunOutput = ImmutableList.builderWithExpectedSize(boundAggOutput.size()); for (NamedExpression output : boundAggOutput) { if (!output.containsType(AggregateFunction.class)) { Slot outputSlot = output.toSlot(); MappingSlot mappingSlot = new MappingSlot(outputSlot, output instanceof Alias ? output.child(0) : output); nonAggFunOutput.add(mappingSlot); } } return toScope(cascadesContext, nonAggFunOutput.build()); }); } private Plan bindSortWithoutSetOperation(MatchingContext<LogicalSort<Plan>> ctx) { CascadesContext cascadesContext = ctx.cascadesContext; LogicalSort<Plan> sort = ctx.root; Plan input = sort.child(); List<Slot> childOutput = input.getOutput(); if (input instanceof LogicalProject && ((LogicalProject<?>) input).isDistinct() && (input.child(0) instanceof LogicalHaving || input.child(0) instanceof LogicalAggregate || input.child(0) instanceof LogicalRepeat)) { input = input.child(0); } if (input instanceof LogicalHaving) { input = input.child(0); } List<Slot> inputSlots = input.getOutput(); Scope inputScope = toScope(cascadesContext, inputSlots); final Plan finalInput = input; Supplier<Scope> inputChildrenScope = Suppliers.memoize( () -> toScope(cascadesContext, PlanUtils.fastGetChildrenOutputs(finalInput.children()))); SimpleExprAnalyzer bindInInputScopeThenInputChildScope = buildCustomSlotBinderAnalyzer( sort, cascadesContext, inputScope, true, false, (self, unboundSlot) -> { List<Slot> slotsInInput = self.bindExactSlotsByThisScope(unboundSlot, inputScope); if (!slotsInInput.isEmpty()) { return ImmutableList.of(slotsInInput.get(0)); } return self.bindExactSlotsByThisScope(unboundSlot, inputChildrenScope.get()); }); SimpleExprAnalyzer bindInInputChildScope = getAnalyzerForOrderByAggFunc(finalInput, cascadesContext, sort, inputChildrenScope, inputScope); Builder<OrderKey> boundOrderKeys = ImmutableList.builderWithExpectedSize(sort.getOrderKeys().size()); FunctionRegistry functionRegistry = cascadesContext.getConnectContext().getEnv().getFunctionRegistry(); for (OrderKey orderKey : sort.getOrderKeys()) { Expression boundKey; if (hasAggregateFunction(orderKey.getExpr(), functionRegistry)) { boundKey = bindInInputChildScope.analyze(orderKey.getExpr()); } else { boundKey = bindWithOrdinal(orderKey.getExpr(), bindInInputScopeThenInputChildScope, childOutput); } boundOrderKeys.add(orderKey.withExpression(boundKey)); } return new LogicalSort<>(boundOrderKeys.build(), sort.child()); } private LogicalTVFRelation bindTableValuedFunction(MatchingContext<UnboundTVFRelation> ctx) { UnboundTVFRelation unboundTVFRelation = ctx.root; StatementContext statementContext = ctx.statementContext; Env env = statementContext.getConnectContext().getEnv(); FunctionRegistry functionRegistry = env.getFunctionRegistry(); String functionName = unboundTVFRelation.getFunctionName(); Properties arguments = unboundTVFRelation.getProperties(); FunctionBuilder functionBuilder = functionRegistry.findFunctionBuilder(functionName, arguments); Pair<? extends Expression, ? extends BoundFunction> bindResult = functionBuilder.build(functionName, arguments); if (!(bindResult.first instanceof TableValuedFunction)) { throw new AnalysisException(bindResult.first.toSql() + " is not a TableValuedFunction"); } Optional<SqlCacheContext> sqlCacheContext = statementContext.getSqlCacheContext(); if (sqlCacheContext.isPresent()) { sqlCacheContext.get().setCannotProcessExpression(true); } return new LogicalTVFRelation(unboundTVFRelation.getRelationId(), (TableValuedFunction) bindResult.first); } private void checkSameNameSlot(List<Slot> childOutputs, String subQueryAlias) { Set<String> nameSlots = new HashSet<>(childOutputs.size() * 2); for (Slot s : childOutputs) { if (!nameSlots.add(s.getInternalName())) { throw new AnalysisException("Duplicated inline view column alias: '" + s.getName() + "'" + " in inline view: '" + subQueryAlias + "'"); } } } private void checkIfOutputAliasNameDuplicatedForGroupBy(Collection<Expression> expressions, List<? extends NamedExpression> output) { if (output.stream().noneMatch(Alias.class::isInstance)) { return; } List<Alias> aliasList = ExpressionUtils.filter(output, Alias.class); List<NamedExpression> exprAliasList = ExpressionUtils.collectAll(expressions, NamedExpression.class::isInstance); boolean isGroupByContainAlias = false; for (NamedExpression ne : exprAliasList) { for (Alias alias : aliasList) { if (!alias.getExprId().equals(ne.getExprId()) && alias.getName().equalsIgnoreCase(ne.getName())) { isGroupByContainAlias = true; } } if (isGroupByContainAlias) { break; } } if (isGroupByContainAlias && ConnectContext.get() != null && ConnectContext.get().getSessionVariable().isGroupByAndHavingUseAliasFirst()) { throw new AnalysisException("group_by_and_having_use_alias=true is unsupported for Nereids"); } } private boolean isAggregateFunction(UnboundFunction unboundFunction, FunctionRegistry functionRegistry) { return functionRegistry.isAggregateFunction( unboundFunction.getDbName(), unboundFunction.getName()); } private Expression bindWithOrdinal( Expression unbound, SimpleExprAnalyzer analyzer, List<? extends Expression> boundSelectOutput) { if (unbound instanceof IntegerLikeLiteral) { int ordinal = ((IntegerLikeLiteral) unbound).getIntValue(); if (ordinal >= 1 && ordinal <= boundSelectOutput.size()) { Expression boundSelectItem = boundSelectOutput.get(ordinal - 1); return boundSelectItem instanceof Alias ? boundSelectItem.child(0) : boundSelectItem; } else { return unbound; } } else { return analyzer.analyze(unbound); } } private <E extends Expression> E checkBoundExceptLambda(E expression, Plan plan) { if (expression instanceof Lambda) { return expression; } if (expression instanceof UnboundSlot) { UnboundSlot unboundSlot = (UnboundSlot) expression; String tableName = StringUtils.join(unboundSlot.getQualifier(), "."); if (tableName.isEmpty()) { tableName = "table list"; } throw new AnalysisException("Unknown column '" + unboundSlot.getNameParts().get(unboundSlot.getNameParts().size() - 1) + "' in '" + tableName + "' in " + plan.getType().toString().substring("LOGICAL_".length()) + " clause"); } expression.children().forEach(e -> checkBoundExceptLambda(e, plan)); return expression; } private Scope toScope(CascadesContext cascadesContext, List<? extends Slot> slots) { Optional<Scope> outerScope = cascadesContext.getOuterScope(); if (outerScope.isPresent()) { return new Scope(outerScope, slots, outerScope.get().getSubquery()); } else { return new Scope(slots); } } private SimpleExprAnalyzer buildSimpleExprAnalyzer( Plan currentPlan, CascadesContext cascadesContext, List<Plan> children, boolean enableExactMatch, boolean bindSlotInOuterScope) { List<Slot> childrenOutputs = PlanUtils.fastGetChildrenOutputs(children); Scope scope = toScope(cascadesContext, childrenOutputs); return buildSimpleExprAnalyzer(currentPlan, cascadesContext, scope, enableExactMatch, bindSlotInOuterScope); } private SimpleExprAnalyzer buildSimpleExprAnalyzer( Plan currentPlan, CascadesContext cascadesContext, Scope scope, boolean enableExactMatch, boolean bindSlotInOuterScope) { ExpressionRewriteContext rewriteContext = new ExpressionRewriteContext(cascadesContext); ExpressionAnalyzer expressionAnalyzer = new ExpressionAnalyzer(currentPlan, scope, cascadesContext, enableExactMatch, bindSlotInOuterScope); return expr -> expressionAnalyzer.analyze(expr, rewriteContext); } private SimpleExprAnalyzer buildCustomSlotBinderAnalyzer( Plan currentPlan, CascadesContext cascadesContext, Scope defaultScope, boolean enableExactMatch, boolean bindSlotInOuterScope, CustomSlotBinderAnalyzer customSlotBinder) { ExpressionRewriteContext rewriteContext = new ExpressionRewriteContext(cascadesContext); ExpressionAnalyzer expressionAnalyzer = new ExpressionAnalyzer(currentPlan, defaultScope, cascadesContext, enableExactMatch, bindSlotInOuterScope) { @Override protected List<? extends Expression> bindSlotByThisScope(UnboundSlot unboundSlot) { return customSlotBinder.bindSlot(this, unboundSlot); } }; return expr -> expressionAnalyzer.analyze(expr, rewriteContext); } private interface SimpleExprAnalyzer { Expression analyze(Expression expr); default <E extends Expression> List<E> analyzeToList(List<E> exprs) { ImmutableList.Builder<E> result = ImmutableList.builderWithExpectedSize(exprs.size()); for (E expr : exprs) { result.add((E) analyze(expr)); } return result.build(); } default <E extends Expression> Set<E> analyzeToSet(List<E> exprs) { ImmutableSet.Builder<E> result = ImmutableSet.builderWithExpectedSize(exprs.size() * 2); for (E expr : exprs) { result.add((E) analyze(expr)); } return result.build(); } } private interface CustomSlotBinderAnalyzer { List<? extends Expression> bindSlot(ExpressionAnalyzer analyzer, UnboundSlot unboundSlot); } public String toSqlWithBackquote(List<Slot> slots) { return slots.stream().map(slot -> ((SlotReference) slot).getQualifiedNameWithBackquote()) .collect(Collectors.joining(", ")); } private boolean hasAggregateFunction(Expression expression, FunctionRegistry functionRegistry) { return expression.anyMatch(expr -> { if (expr instanceof AggregateFunction) { return true; } else if (expr instanceof UnboundFunction) { UnboundFunction unboundFunction = (UnboundFunction) expr; boolean isAggregateFunction = functionRegistry .isAggregateFunction( unboundFunction.getDbName(), unboundFunction.getName() ); return isAggregateFunction; } return false; }); } private SimpleExprAnalyzer getAnalyzerForOrderByAggFunc(Plan finalInput, CascadesContext cascadesContext, LogicalSort<Plan> sort, Supplier<Scope> inputChildrenScope, Scope inputScope) { ImmutableList.Builder<Slot> outputSlots = ImmutableList.builder(); if (finalInput instanceof LogicalAggregate) { LogicalAggregate<Plan> aggregate = (LogicalAggregate<Plan>) finalInput; List<NamedExpression> outputExpressions = aggregate.getOutputExpressions(); for (NamedExpression outputExpr : outputExpressions) { if (!outputExpr.anyMatch(expr -> expr instanceof AggregateFunction)) { outputSlots.add(outputExpr.toSlot()); } } } Scope outputWithoutAggFunc = toScope(cascadesContext, outputSlots.build()); SimpleExprAnalyzer bindInInputChildScope = buildCustomSlotBinderAnalyzer( sort, cascadesContext, inputScope, true, false, (analyzer, unboundSlot) -> { if (finalInput instanceof LogicalAggregate) { List<Slot> boundInOutputWithoutAggFunc = analyzer.bindSlotByScope(unboundSlot, outputWithoutAggFunc); if (!boundInOutputWithoutAggFunc.isEmpty()) { return ImmutableList.of(boundInOutputWithoutAggFunc.get(0)); } } return analyzer.bindExactSlotsByThisScope(unboundSlot, inputChildrenScope.get()); }); return bindInInputChildScope; } }
class BindExpression implements AnalysisRuleFactory { public static final Logger LOG = LogManager.getLogger(NereidsPlanner.class); @Override public List<Rule> buildRules() { /* * some rules not only depends on the condition Plan::canBind, for example, * BINDING_FILTER_SLOT need transform 'filter(unix_timestamp() > 100)' to * 'filter(unix_timestamp() > cast(100 as int))'. there is no any unbound expression * in the filter, so the Plan::canBind return false. * * we need `isAppliedRule` to judge whether a plan is applied to a rule, so need convert * the normal rule to `AppliedAwareRule` to read and write the mutable state. */ AppliedAwareRuleCondition ruleCondition = new AppliedAwareRuleCondition() { @Override protected boolean condition(Rule rule, Plan plan) { if (!rule.getPattern().matchRoot(plan)) { return false; } return plan.canBind() || (plan.bound() && !isAppliedRule(rule, plan)); } }; return ImmutableList.of( RuleType.BINDING_PROJECT_SLOT.build( logicalProject().thenApply(this::bindProject) ), RuleType.BINDING_FILTER_SLOT.build( logicalFilter().thenApply(this::bindFilter) ), RuleType.BINDING_USING_JOIN_SLOT.build( usingJoin().thenApply(this::bindUsingJoin) ), RuleType.BINDING_JOIN_SLOT.build( logicalJoin().thenApply(this::bindJoin) ), RuleType.BINDING_AGGREGATE_SLOT.build( logicalAggregate().thenApply(this::bindAggregate) ), RuleType.BINDING_REPEAT_SLOT.build( logicalRepeat().thenApply(this::bindRepeat) ), RuleType.BINDING_SORT_SLOT.build( logicalSort(any().whenNot(SetOperation.class::isInstance)) .thenApply(this::bindSortWithoutSetOperation) ), RuleType.BINDING_SORT_SET_OPERATION_SLOT.build( logicalSort(logicalSetOperation()).thenApply(this::bindSortWithSetOperation) ), RuleType.BINDING_HAVING_SLOT.build( logicalHaving(aggregate()).thenApply(this::bindHavingAggregate) ), RuleType.BINDING_HAVING_SLOT.build( logicalHaving(any().whenNot(Aggregate.class::isInstance)).thenApply(this::bindHaving) ), RuleType.BINDING_INLINE_TABLE_SLOT.build( logicalInlineTable().thenApply(this::bindInlineTable) ), RuleType.BINDING_ONE_ROW_RELATION_SLOT.build( unboundOneRowRelation().thenApply(this::bindOneRowRelation) ), RuleType.BINDING_SET_OPERATION_SLOT.build( logicalSetOperation().when(LogicalSetOperation::canBind).then(this::bindSetOperation) ), RuleType.BINDING_GENERATE_SLOT.build( logicalGenerate().when(AbstractPlan::canBind).thenApply(this::bindGenerate) ), RuleType.BINDING_UNBOUND_TVF_RELATION_FUNCTION.build( unboundTVFRelation().thenApply(this::bindTableValuedFunction) ), RuleType.BINDING_SUBQUERY_ALIAS_SLOT.build( logicalSubQueryAlias().thenApply(this::bindSubqueryAlias) ), RuleType.BINDING_RESULT_SINK.build( unboundResultSink().thenApply(this::bindResultSink) ) ).stream().map(ruleCondition).collect(ImmutableList.toImmutableList()); } private LogicalResultSink<Plan> bindResultSink(MatchingContext<UnboundResultSink<Plan>> ctx) { LogicalSink<Plan> sink = ctx.root; if (ctx.connectContext.getState().isQuery()) { List<NamedExpression> outputExprs = sink.child().getOutput().stream() .map(NamedExpression.class::cast) .collect(ImmutableList.toImmutableList()); return new LogicalResultSink<>(outputExprs, sink.child()); } final ImmutableListMultimap.Builder<ExprId, Integer> exprIdToIndexMapBuilder = ImmutableListMultimap.builder(); List<Slot> childOutput = sink.child().getOutput(); for (int index = 0; index < childOutput.size(); index++) { exprIdToIndexMapBuilder.put(childOutput.get(index).getExprId(), index); } InferPlanOutputAlias aliasInfer = new InferPlanOutputAlias(childOutput); List<NamedExpression> output = aliasInfer.infer(sink.child(), exprIdToIndexMapBuilder.build()); return new LogicalResultSink<>(output, sink.child()); } private LogicalSubQueryAlias<Plan> bindSubqueryAlias(MatchingContext<LogicalSubQueryAlias<Plan>> ctx) { LogicalSubQueryAlias<Plan> subQueryAlias = ctx.root; checkSameNameSlot(subQueryAlias.child(0).getOutput(), subQueryAlias.getAlias()); return subQueryAlias; } private LogicalPlan bindGenerate(MatchingContext<LogicalGenerate<Plan>> ctx) { LogicalGenerate<Plan> generate = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; Builder<Slot> outputSlots = ImmutableList.builder(); Builder<Function> boundGenerators = ImmutableList.builder(); List<Alias> expandAlias = Lists.newArrayList(); SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( generate, cascadesContext, generate.children(), true, true); for (int i = 0; i < generate.getGeneratorOutput().size(); i++) { UnboundSlot slot = (UnboundSlot) generate.getGeneratorOutput().get(i); Preconditions.checkState(slot.getNameParts().size() == 2, "the size of nameParts of UnboundSlot in LogicalGenerate must be 2."); Expression boundGenerator = analyzer.analyze(generate.getGenerators().get(i)); if (!(boundGenerator instanceof TableGeneratingFunction)) { throw new AnalysisException(boundGenerator.toSql() + " is not a TableGeneratingFunction"); } Function generator = (Function) boundGenerator; boundGenerators.add(generator); Slot boundSlot = new SlotReference(slot.getNameParts().get(1), generator.getDataType(), generator.nullable(), ImmutableList.of(slot.getNameParts().get(0))); outputSlots.add(boundSlot); if (generate.getExpandColumnAlias() != null && i < generate.getExpandColumnAlias().size() && !CollectionUtils.isEmpty(generate.getExpandColumnAlias().get(i))) { List<StructField> fields = ((StructType) boundSlot.getDataType()).getFields(); for (int idx = 0; idx < fields.size(); ++idx) { expandAlias.add(new Alias(new StructElement( boundSlot, new StringLiteral(fields.get(idx).getName())), generate.getExpandColumnAlias().get(i).get(idx))); } } } LogicalGenerate<Plan> ret = new LogicalGenerate<>( boundGenerators.build(), outputSlots.build(), generate.child()); if (!expandAlias.isEmpty()) { List<NamedExpression> allProjectSlots = generate.child().getOutput().stream() .map(NamedExpression.class::cast) .collect(Collectors.toList()); allProjectSlots.addAll(expandAlias); return new LogicalProject<>(allProjectSlots, ret); } return ret; } private LogicalSetOperation bindSetOperation(LogicalSetOperation setOperation) { if (setOperation.child(0).getOutput().size() != setOperation.child(1).getOutput().size()) { throw new AnalysisException("Operands have unequal number of columns:\n" + "'" + setOperation.child(0).getOutput() + "' has " + setOperation.child(0).getOutput().size() + " column(s)\n" + "'" + setOperation.child(1).getOutput() + "' has " + setOperation.child(1).getOutput().size() + " column(s)"); } if (setOperation.getQualifier() == Qualifier.ALL && (setOperation instanceof LogicalExcept || setOperation instanceof LogicalIntersect)) { throw new AnalysisException("INTERSECT and EXCEPT does not support ALL qualified"); } List<List<NamedExpression>> childrenProjections = setOperation.collectChildrenProjections(); int childrenProjectionSize = childrenProjections.size(); Builder<List<SlotReference>> childrenOutputs = ImmutableList.builderWithExpectedSize(childrenProjectionSize); Builder<Plan> newChildren = ImmutableList.builderWithExpectedSize(childrenProjectionSize); for (int i = 0; i < childrenProjectionSize; i++) { Plan newChild; if (childrenProjections.stream().allMatch(SlotReference.class::isInstance)) { newChild = setOperation.child(i); } else { newChild = new LogicalProject<>(childrenProjections.get(i), setOperation.child(i)); } newChildren.add(newChild); childrenOutputs.add((List<SlotReference>) (List) newChild.getOutput()); } setOperation = setOperation.withChildrenAndTheirOutputs(newChildren.build(), childrenOutputs.build()); List<NamedExpression> newOutputs = setOperation.buildNewOutputs(); return setOperation.withNewOutputs(newOutputs); } @NotNull private LogicalOneRowRelation bindOneRowRelation(MatchingContext<UnboundOneRowRelation> ctx) { UnboundOneRowRelation oneRowRelation = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( oneRowRelation, cascadesContext, ImmutableList.of(), true, true); List<NamedExpression> projects = analyzer.analyzeToList(oneRowRelation.getProjects()); return new LogicalOneRowRelation(oneRowRelation.getRelationId(), projects); } private LogicalPlan bindInlineTable(MatchingContext<LogicalInlineTable> ctx) { LogicalInlineTable logicalInlineTable = ctx.root; List<LogicalPlan> relations = Lists.newArrayListWithCapacity(logicalInlineTable.getConstantExprsList().size()); for (int i = 0; i < logicalInlineTable.getConstantExprsList().size(); i++) { for (NamedExpression constantExpr : logicalInlineTable.getConstantExprsList().get(i)) { if (constantExpr instanceof DefaultValueSlot) { throw new AnalysisException("Default expression" + " can't exist in SELECT statement at row " + (i + 1)); } } relations.add(new UnboundOneRowRelation(StatementScopeIdGenerator.newRelationId(), logicalInlineTable.getConstantExprsList().get(i))); } return LogicalPlanBuilder.reduceToLogicalPlanTree(0, relations.size() - 1, relations, Qualifier.ALL); } private LogicalHaving<Plan> bindHaving(MatchingContext<LogicalHaving<Plan>> ctx) { LogicalHaving<Plan> having = ctx.root; Plan childPlan = having.child(); CascadesContext cascadesContext = ctx.cascadesContext; Scope childOutput = toScope(cascadesContext, childPlan.getOutput()); Supplier<Scope> childChildrenOutput = Suppliers.memoize(() -> toScope(cascadesContext, PlanUtils.fastGetChildrenOutputs(childPlan.children())) ); return bindHavingByScopes(having, cascadesContext, childOutput, childChildrenOutput); } private LogicalHaving<Plan> bindHavingAggregate( MatchingContext<LogicalHaving<Aggregate<Plan>>> ctx) { LogicalHaving<Aggregate<Plan>> having = ctx.root; Aggregate<Plan> aggregate = having.child(); CascadesContext cascadesContext = ctx.cascadesContext; Supplier<CustomSlotBinderAnalyzer> bindByAggChild = Suppliers.memoize(() -> { Scope aggChildOutputScope = toScope(cascadesContext, PlanUtils.fastGetChildrenOutputs(aggregate.children())); return (analyzer, unboundSlot) -> analyzer.bindSlotByScope(unboundSlot, aggChildOutputScope); }); Scope aggOutputScope = toScope(cascadesContext, aggregate.getOutput()); Supplier<CustomSlotBinderAnalyzer> bindByGroupByThenAggOutputThenAggChild = Suppliers.memoize(() -> { List<Expression> groupByExprs = aggregate.getGroupByExpressions(); ImmutableList.Builder<Slot> groupBySlots = ImmutableList.builderWithExpectedSize(groupByExprs.size()); for (Expression groupBy : groupByExprs) { if (groupBy instanceof Slot) { groupBySlots.add((Slot) groupBy); } } Scope groupBySlotsScope = toScope(cascadesContext, groupBySlots.build()); return (analyzer, unboundSlot) -> { List<Slot> boundInGroupBy = analyzer.bindSlotByScope(unboundSlot, groupBySlotsScope); if (!boundInGroupBy.isEmpty()) { return ImmutableList.of(boundInGroupBy.get(0)); } List<Slot> boundInAggOutput = analyzer.bindSlotByScope(unboundSlot, aggOutputScope); if (!boundInAggOutput.isEmpty()) { return ImmutableList.of(boundInAggOutput.get(0)); } List<? extends Expression> expressions = bindByAggChild.get().bindSlot(analyzer, unboundSlot); return expressions.isEmpty() ? expressions : ImmutableList.of(expressions.get(0)); }; }); FunctionRegistry functionRegistry = cascadesContext.getConnectContext().getEnv().getFunctionRegistry(); ExpressionAnalyzer havingAnalyzer = new ExpressionAnalyzer(having, aggOutputScope, cascadesContext, false, true) { private boolean currentIsInAggregateFunction; @Override public Expression visitAggregateFunction(AggregateFunction aggregateFunction, ExpressionRewriteContext context) { if (!currentIsInAggregateFunction) { currentIsInAggregateFunction = true; try { return super.visitAggregateFunction(aggregateFunction, context); } finally { currentIsInAggregateFunction = false; } } else { return super.visitAggregateFunction(aggregateFunction, context); } } @Override public Expression visitUnboundFunction(UnboundFunction unboundFunction, ExpressionRewriteContext context) { if (!currentIsInAggregateFunction && isAggregateFunction(unboundFunction, functionRegistry)) { currentIsInAggregateFunction = true; try { return super.visitUnboundFunction(unboundFunction, context); } finally { currentIsInAggregateFunction = false; } } else { return super.visitUnboundFunction(unboundFunction, context); } } @Override protected List<? extends Expression> bindSlotByThisScope(UnboundSlot unboundSlot) { if (currentIsInAggregateFunction) { return bindByAggChild.get().bindSlot(this, unboundSlot); } else { return bindByGroupByThenAggOutputThenAggChild.get().bindSlot(this, unboundSlot); } } }; Set<Expression> havingExprs = having.getConjuncts(); ImmutableSet.Builder<Expression> analyzedHaving = ImmutableSet.builderWithExpectedSize(havingExprs.size()); ExpressionRewriteContext rewriteContext = new ExpressionRewriteContext(cascadesContext); for (Expression expression : havingExprs) { analyzedHaving.add(havingAnalyzer.analyze(expression, rewriteContext)); } return new LogicalHaving<>(analyzedHaving.build(), having.child()); } private LogicalHaving<Plan> bindHavingByScopes( LogicalHaving<? extends Plan> having, CascadesContext cascadesContext, Scope defaultScope, Supplier<Scope> backupScope) { Plan child = having.child(); SimpleExprAnalyzer analyzer = buildCustomSlotBinderAnalyzer( having, cascadesContext, defaultScope, false, true, (self, unboundSlot) -> { List<Slot> slots = self.bindSlotByScope(unboundSlot, defaultScope); if (!slots.isEmpty()) { return slots; } return self.bindSlotByScope(unboundSlot, backupScope.get()); }); ImmutableSet.Builder<Expression> boundConjuncts = ImmutableSet.builderWithExpectedSize(having.getConjuncts().size()); for (Expression conjunct : having.getConjuncts()) { conjunct = analyzer.analyze(conjunct); conjunct = TypeCoercionUtils.castIfNotSameType(conjunct, BooleanType.INSTANCE); boundConjuncts.add(conjunct); } checkIfOutputAliasNameDuplicatedForGroupBy(boundConjuncts.build(), child instanceof LogicalProject ? ((LogicalProject<?>) child).getOutputs() : child.getOutput()); return new LogicalHaving<>(boundConjuncts.build(), having.child()); } private LogicalSort<LogicalSetOperation> bindSortWithSetOperation( MatchingContext<LogicalSort<LogicalSetOperation>> ctx) { LogicalSort<LogicalSetOperation> sort = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; List<Slot> childOutput = sort.child().getOutput(); SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( sort, cascadesContext, sort.children(), true, true); Builder<OrderKey> boundKeys = ImmutableList.builderWithExpectedSize(sort.getOrderKeys().size()); for (OrderKey orderKey : sort.getOrderKeys()) { Expression boundKey = bindWithOrdinal(orderKey.getExpr(), analyzer, childOutput); boundKeys.add(orderKey.withExpression(boundKey)); } return new LogicalSort<>(boundKeys.build(), sort.child()); } private LogicalJoin<Plan, Plan> bindJoin(MatchingContext<LogicalJoin<Plan, Plan>> ctx) { LogicalJoin<Plan, Plan> join = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( join, cascadesContext, join.children(), true, true); Builder<Expression> hashJoinConjuncts = ImmutableList.builderWithExpectedSize( join.getHashJoinConjuncts().size()); for (Expression hashJoinConjunct : join.getHashJoinConjuncts()) { hashJoinConjunct = analyzer.analyze(hashJoinConjunct); hashJoinConjunct = TypeCoercionUtils.castIfNotSameType(hashJoinConjunct, BooleanType.INSTANCE); hashJoinConjuncts.add(hashJoinConjunct); } Builder<Expression> otherJoinConjuncts = ImmutableList.builderWithExpectedSize( join.getOtherJoinConjuncts().size()); for (Expression otherJoinConjunct : join.getOtherJoinConjuncts()) { otherJoinConjunct = analyzer.analyze(otherJoinConjunct); otherJoinConjunct = TypeCoercionUtils.castIfNotSameType(otherJoinConjunct, BooleanType.INSTANCE); otherJoinConjuncts.add(otherJoinConjunct); } return new LogicalJoin<>(join.getJoinType(), hashJoinConjuncts.build(), otherJoinConjuncts.build(), join.getDistributeHint(), join.getMarkJoinSlotReference(), join.children(), null); } private LogicalJoin<Plan, Plan> bindUsingJoin(MatchingContext<UsingJoin<Plan, Plan>> ctx) { UsingJoin<Plan, Plan> using = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; List<Expression> unboundHashJoinConjunct = using.getHashJoinConjuncts(); List<Slot> leftOutput = Utils.reverseImmutableList(using.left().getOutput()); Scope leftScope = toScope(cascadesContext, ExpressionUtils.distinctSlotByName(leftOutput)); Scope rightScope = toScope(cascadesContext, ExpressionUtils.distinctSlotByName(using.right().getOutput())); ExpressionRewriteContext rewriteContext = new ExpressionRewriteContext(cascadesContext); Builder<Expression> hashEqExprs = ImmutableList.builderWithExpectedSize(unboundHashJoinConjunct.size()); for (Expression usingColumn : unboundHashJoinConjunct) { ExpressionAnalyzer leftExprAnalyzer = new ExpressionAnalyzer( using, leftScope, cascadesContext, true, false); Expression usingLeftSlot = leftExprAnalyzer.analyze(usingColumn, rewriteContext); ExpressionAnalyzer rightExprAnalyzer = new ExpressionAnalyzer( using, rightScope, cascadesContext, true, false); Expression usingRightSlot = rightExprAnalyzer.analyze(usingColumn, rewriteContext); hashEqExprs.add(new EqualTo(usingLeftSlot, usingRightSlot)); } return new LogicalJoin<>( using.getJoinType() == JoinType.CROSS_JOIN ? JoinType.INNER_JOIN : using.getJoinType(), hashEqExprs.build(), using.getOtherJoinConjuncts(), using.getDistributeHint(), using.getMarkJoinSlotReference(), using.children(), null); } private Plan bindProject(MatchingContext<LogicalProject<Plan>> ctx) { LogicalProject<Plan> project = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( project, cascadesContext, project.children(), true, true); List<NamedExpression> excepts = project.getExcepts(); Supplier<Set<NamedExpression>> boundExcepts = Suppliers.memoize( () -> analyzer.analyzeToSet(project.getExcepts())); Builder<NamedExpression> boundProjections = ImmutableList.builderWithExpectedSize(project.arity()); StatementContext statementContext = ctx.statementContext; for (Expression expression : project.getProjects()) { Expression expr = analyzer.analyze(expression); if (!(expr instanceof BoundStar)) { boundProjections.add((NamedExpression) expr); } else { BoundStar boundStar = (BoundStar) expr; List<Slot> slots = boundStar.getSlots(); if (!excepts.isEmpty()) { slots = Utils.filterImmutableList(slots, slot -> !boundExcepts.get().contains(slot)); } boundProjections.addAll(slots); List<Slot> slotsForLambda = slots; UnboundStar unboundStar = (UnboundStar) expression; unboundStar.getIndexInSqlString().ifPresent(pair -> { statementContext.addIndexInSqlToString(pair, toSqlWithBackquote(slotsForLambda)); }); } } return project.withProjects(boundProjections.build()); } private Plan bindFilter(MatchingContext<LogicalFilter<Plan>> ctx) { LogicalFilter<Plan> filter = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer analyzer = buildSimpleExprAnalyzer( filter, cascadesContext, filter.children(), true, true); ImmutableSet.Builder<Expression> boundConjuncts = ImmutableSet.builderWithExpectedSize( filter.getConjuncts().size()); for (Expression conjunct : filter.getConjuncts()) { Expression boundConjunct = analyzer.analyze(conjunct); boundConjunct = TypeCoercionUtils.castIfNotSameType(boundConjunct, BooleanType.INSTANCE); boundConjuncts.add(boundConjunct); } return new LogicalFilter<>(boundConjuncts.build(), filter.child()); } private Plan bindRepeat(MatchingContext<LogicalRepeat<Plan>> ctx) { LogicalRepeat<Plan> repeat = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer repeatOutputAnalyzer = buildSimpleExprAnalyzer( repeat, cascadesContext, repeat.children(), true, true); List<NamedExpression> boundRepeatOutput = repeatOutputAnalyzer.analyzeToList(repeat.getOutputExpressions()); Supplier<Scope> aggOutputScopeWithoutAggFun = buildAggOutputScopeWithoutAggFun(boundRepeatOutput, cascadesContext); Builder<List<Expression>> boundGroupingSetsBuilder = ImmutableList.builderWithExpectedSize(repeat.getGroupingSets().size()); for (List<Expression> groupingSet : repeat.getGroupingSets()) { List<Expression> boundGroupingSet = bindGroupBy( repeat, groupingSet, boundRepeatOutput, aggOutputScopeWithoutAggFun, cascadesContext); boundGroupingSetsBuilder.add(boundGroupingSet); } List<List<Expression>> boundGroupingSets = boundGroupingSetsBuilder.build(); List<NamedExpression> nullableOutput = PlanUtils.adjustNullableForRepeat(boundGroupingSets, boundRepeatOutput); for (List<Expression> groupingSet : boundGroupingSets) { checkIfOutputAliasNameDuplicatedForGroupBy(groupingSet, nullableOutput); } Set<Slot> groupingExprs = boundGroupingSets.stream() .flatMap(Collection::stream).map(expr -> expr.getInputSlots()) .flatMap(Collection::stream).collect(Collectors.toSet()); Set<GroupingScalarFunction> groupingScalarFunctions = ExpressionUtils .collect(nullableOutput, GroupingScalarFunction.class::isInstance); for (GroupingScalarFunction function : groupingScalarFunctions) { if (!groupingExprs.containsAll(function.getInputSlots())) { throw new AnalysisException("Column in " + function.getName() + " does not exist in GROUP BY clause."); } } return repeat.withGroupSetsAndOutput(boundGroupingSets, nullableOutput); } private List<Expression> bindGroupBy( Aggregate<Plan> agg, List<Expression> groupBy, List<NamedExpression> boundAggOutput, Supplier<Scope> aggOutputScopeWithoutAggFun, CascadesContext cascadesContext) { Scope childOutputScope = toScope(cascadesContext, agg.child().getOutput()); SimpleExprAnalyzer analyzer = buildCustomSlotBinderAnalyzer( agg, cascadesContext, childOutputScope, true, true, (self, unboundSlot) -> { List<Slot> slotsInChildren = self.bindExactSlotsByThisScope(unboundSlot, childOutputScope); if (slotsInChildren.size() == 1) { return slotsInChildren; } List<Slot> slotsInOutput = self.bindExactSlotsByThisScope( unboundSlot, aggOutputScopeWithoutAggFun.get()); if (slotsInOutput.isEmpty()) { return slotsInChildren; } Builder<Expression> useOutputExpr = ImmutableList.builderWithExpectedSize(slotsInOutput.size()); for (Slot slotInOutput : slotsInOutput) { MappingSlot mappingSlot = (MappingSlot) slotInOutput; useOutputExpr.add(mappingSlot.getMappingExpression()); } return useOutputExpr.build(); }); ImmutableList.Builder<Expression> boundGroupByBuilder = ImmutableList.builderWithExpectedSize(groupBy.size()); for (Expression key : groupBy) { boundGroupByBuilder.add(bindWithOrdinal(key, analyzer, boundAggOutput)); } List<Expression> boundGroupBy = boundGroupByBuilder.build(); checkIfOutputAliasNameDuplicatedForGroupBy(boundGroupBy, boundAggOutput); return boundGroupBy; } private Supplier<Scope> buildAggOutputScopeWithoutAggFun( List<? extends NamedExpression> boundAggOutput, CascadesContext cascadesContext) { return Suppliers.memoize(() -> { Builder<MappingSlot> nonAggFunOutput = ImmutableList.builderWithExpectedSize(boundAggOutput.size()); for (NamedExpression output : boundAggOutput) { if (!output.containsType(AggregateFunction.class)) { Slot outputSlot = output.toSlot(); MappingSlot mappingSlot = new MappingSlot(outputSlot, output instanceof Alias ? output.child(0) : output); nonAggFunOutput.add(mappingSlot); } } return toScope(cascadesContext, nonAggFunOutput.build()); }); } private Plan bindSortWithoutSetOperation(MatchingContext<LogicalSort<Plan>> ctx) { CascadesContext cascadesContext = ctx.cascadesContext; LogicalSort<Plan> sort = ctx.root; Plan input = sort.child(); List<Slot> childOutput = input.getOutput(); if (input instanceof LogicalProject && ((LogicalProject<?>) input).isDistinct() && (input.child(0) instanceof LogicalHaving || input.child(0) instanceof LogicalAggregate || input.child(0) instanceof LogicalRepeat)) { input = input.child(0); } if (input instanceof LogicalHaving) { input = input.child(0); } List<Slot> inputSlots = input.getOutput(); Scope inputScope = toScope(cascadesContext, inputSlots); final Plan finalInput = input; Supplier<Scope> inputChildrenScope = Suppliers.memoize( () -> toScope(cascadesContext, PlanUtils.fastGetChildrenOutputs(finalInput.children()))); SimpleExprAnalyzer bindInInputScopeThenInputChildScope = buildCustomSlotBinderAnalyzer( sort, cascadesContext, inputScope, true, false, (self, unboundSlot) -> { List<Slot> slotsInInput = self.bindExactSlotsByThisScope(unboundSlot, inputScope); if (!slotsInInput.isEmpty()) { return ImmutableList.of(slotsInInput.get(0)); } return self.bindExactSlotsByThisScope(unboundSlot, inputChildrenScope.get()); }); SimpleExprAnalyzer bindInInputChildScope = getAnalyzerForOrderByAggFunc(finalInput, cascadesContext, sort, inputChildrenScope, inputScope); Builder<OrderKey> boundOrderKeys = ImmutableList.builderWithExpectedSize(sort.getOrderKeys().size()); FunctionRegistry functionRegistry = cascadesContext.getConnectContext().getEnv().getFunctionRegistry(); for (OrderKey orderKey : sort.getOrderKeys()) { Expression boundKey; if (hasAggregateFunction(orderKey.getExpr(), functionRegistry)) { boundKey = bindInInputChildScope.analyze(orderKey.getExpr()); } else { boundKey = bindWithOrdinal(orderKey.getExpr(), bindInInputScopeThenInputChildScope, childOutput); } boundOrderKeys.add(orderKey.withExpression(boundKey)); } return new LogicalSort<>(boundOrderKeys.build(), sort.child()); } private LogicalTVFRelation bindTableValuedFunction(MatchingContext<UnboundTVFRelation> ctx) { UnboundTVFRelation unboundTVFRelation = ctx.root; StatementContext statementContext = ctx.statementContext; Env env = statementContext.getConnectContext().getEnv(); FunctionRegistry functionRegistry = env.getFunctionRegistry(); String functionName = unboundTVFRelation.getFunctionName(); Properties arguments = unboundTVFRelation.getProperties(); FunctionBuilder functionBuilder = functionRegistry.findFunctionBuilder(functionName, arguments); Pair<? extends Expression, ? extends BoundFunction> bindResult = functionBuilder.build(functionName, arguments); if (!(bindResult.first instanceof TableValuedFunction)) { throw new AnalysisException(bindResult.first.toSql() + " is not a TableValuedFunction"); } Optional<SqlCacheContext> sqlCacheContext = statementContext.getSqlCacheContext(); if (sqlCacheContext.isPresent()) { sqlCacheContext.get().setCannotProcessExpression(true); } return new LogicalTVFRelation(unboundTVFRelation.getRelationId(), (TableValuedFunction) bindResult.first); } private void checkSameNameSlot(List<Slot> childOutputs, String subQueryAlias) { Set<String> nameSlots = new HashSet<>(childOutputs.size() * 2); for (Slot s : childOutputs) { if (!nameSlots.add(s.getInternalName())) { throw new AnalysisException("Duplicated inline view column alias: '" + s.getName() + "'" + " in inline view: '" + subQueryAlias + "'"); } } } private void checkIfOutputAliasNameDuplicatedForGroupBy(Collection<Expression> expressions, List<? extends NamedExpression> output) { if (output.stream().noneMatch(Alias.class::isInstance)) { return; } List<Alias> aliasList = ExpressionUtils.filter(output, Alias.class); List<NamedExpression> exprAliasList = ExpressionUtils.collectAll(expressions, NamedExpression.class::isInstance); boolean isGroupByContainAlias = false; for (NamedExpression ne : exprAliasList) { for (Alias alias : aliasList) { if (!alias.getExprId().equals(ne.getExprId()) && alias.getName().equalsIgnoreCase(ne.getName())) { isGroupByContainAlias = true; } } if (isGroupByContainAlias) { break; } } if (isGroupByContainAlias && ConnectContext.get() != null && ConnectContext.get().getSessionVariable().isGroupByAndHavingUseAliasFirst()) { throw new AnalysisException("group_by_and_having_use_alias=true is unsupported for Nereids"); } } private boolean isAggregateFunction(UnboundFunction unboundFunction, FunctionRegistry functionRegistry) { return functionRegistry.isAggregateFunction( unboundFunction.getDbName(), unboundFunction.getName()); } private Expression bindWithOrdinal( Expression unbound, SimpleExprAnalyzer analyzer, List<? extends Expression> boundSelectOutput) { if (unbound instanceof IntegerLikeLiteral) { int ordinal = ((IntegerLikeLiteral) unbound).getIntValue(); if (ordinal >= 1 && ordinal <= boundSelectOutput.size()) { Expression boundSelectItem = boundSelectOutput.get(ordinal - 1); return boundSelectItem instanceof Alias ? boundSelectItem.child(0) : boundSelectItem; } else { return unbound; } } else { return analyzer.analyze(unbound); } } private <E extends Expression> E checkBoundExceptLambda(E expression, Plan plan) { if (expression instanceof Lambda) { return expression; } if (expression instanceof UnboundSlot) { UnboundSlot unboundSlot = (UnboundSlot) expression; String tableName = StringUtils.join(unboundSlot.getQualifier(), "."); if (tableName.isEmpty()) { tableName = "table list"; } throw new AnalysisException("Unknown column '" + unboundSlot.getNameParts().get(unboundSlot.getNameParts().size() - 1) + "' in '" + tableName + "' in " + plan.getType().toString().substring("LOGICAL_".length()) + " clause"); } expression.children().forEach(e -> checkBoundExceptLambda(e, plan)); return expression; } private Scope toScope(CascadesContext cascadesContext, List<? extends Slot> slots) { Optional<Scope> outerScope = cascadesContext.getOuterScope(); if (outerScope.isPresent()) { return new Scope(outerScope, slots, outerScope.get().getSubquery()); } else { return new Scope(slots); } } private SimpleExprAnalyzer buildSimpleExprAnalyzer( Plan currentPlan, CascadesContext cascadesContext, List<Plan> children, boolean enableExactMatch, boolean bindSlotInOuterScope) { List<Slot> childrenOutputs = PlanUtils.fastGetChildrenOutputs(children); Scope scope = toScope(cascadesContext, childrenOutputs); return buildSimpleExprAnalyzer(currentPlan, cascadesContext, scope, enableExactMatch, bindSlotInOuterScope); } private SimpleExprAnalyzer buildSimpleExprAnalyzer( Plan currentPlan, CascadesContext cascadesContext, Scope scope, boolean enableExactMatch, boolean bindSlotInOuterScope) { ExpressionRewriteContext rewriteContext = new ExpressionRewriteContext(cascadesContext); ExpressionAnalyzer expressionAnalyzer = new ExpressionAnalyzer(currentPlan, scope, cascadesContext, enableExactMatch, bindSlotInOuterScope); return expr -> expressionAnalyzer.analyze(expr, rewriteContext); } private SimpleExprAnalyzer buildCustomSlotBinderAnalyzer( Plan currentPlan, CascadesContext cascadesContext, Scope defaultScope, boolean enableExactMatch, boolean bindSlotInOuterScope, CustomSlotBinderAnalyzer customSlotBinder) { ExpressionRewriteContext rewriteContext = new ExpressionRewriteContext(cascadesContext); ExpressionAnalyzer expressionAnalyzer = new ExpressionAnalyzer(currentPlan, defaultScope, cascadesContext, enableExactMatch, bindSlotInOuterScope) { @Override protected List<? extends Expression> bindSlotByThisScope(UnboundSlot unboundSlot) { return customSlotBinder.bindSlot(this, unboundSlot); } }; return expr -> expressionAnalyzer.analyze(expr, rewriteContext); } private interface SimpleExprAnalyzer { Expression analyze(Expression expr); default <E extends Expression> List<E> analyzeToList(List<E> exprs) { ImmutableList.Builder<E> result = ImmutableList.builderWithExpectedSize(exprs.size()); for (E expr : exprs) { result.add((E) analyze(expr)); } return result.build(); } default <E extends Expression> Set<E> analyzeToSet(List<E> exprs) { ImmutableSet.Builder<E> result = ImmutableSet.builderWithExpectedSize(exprs.size() * 2); for (E expr : exprs) { result.add((E) analyze(expr)); } return result.build(); } } private interface CustomSlotBinderAnalyzer { List<? extends Expression> bindSlot(ExpressionAnalyzer analyzer, UnboundSlot unboundSlot); } public String toSqlWithBackquote(List<Slot> slots) { return slots.stream().map(slot -> ((SlotReference) slot).getQualifiedNameWithBackquote()) .collect(Collectors.joining(", ")); } private boolean hasAggregateFunction(Expression expression, FunctionRegistry functionRegistry) { return expression.anyMatch(expr -> { if (expr instanceof AggregateFunction) { return true; } else if (expr instanceof UnboundFunction) { UnboundFunction unboundFunction = (UnboundFunction) expr; boolean isAggregateFunction = functionRegistry .isAggregateFunction( unboundFunction.getDbName(), unboundFunction.getName() ); return isAggregateFunction; } return false; }); } private SimpleExprAnalyzer getAnalyzerForOrderByAggFunc(Plan finalInput, CascadesContext cascadesContext, LogicalSort<Plan> sort, Supplier<Scope> inputChildrenScope, Scope inputScope) { ImmutableList.Builder<Slot> outputSlots = ImmutableList.builder(); if (finalInput instanceof LogicalAggregate) { LogicalAggregate<Plan> aggregate = (LogicalAggregate<Plan>) finalInput; List<NamedExpression> outputExpressions = aggregate.getOutputExpressions(); for (NamedExpression outputExpr : outputExpressions) { if (!outputExpr.anyMatch(expr -> expr instanceof AggregateFunction)) { outputSlots.add(outputExpr.toSlot()); } } } Scope outputWithoutAggFunc = toScope(cascadesContext, outputSlots.build()); SimpleExprAnalyzer bindInInputChildScope = buildCustomSlotBinderAnalyzer( sort, cascadesContext, inputScope, true, false, (analyzer, unboundSlot) -> { if (finalInput instanceof LogicalAggregate) { List<Slot> boundInOutputWithoutAggFunc = analyzer.bindSlotByScope(unboundSlot, outputWithoutAggFunc); if (!boundInOutputWithoutAggFunc.isEmpty()) { return ImmutableList.of(boundInOutputWithoutAggFunc.get(0)); } } return analyzer.bindExactSlotsByThisScope(unboundSlot, inputChildrenScope.get()); }); return bindInInputChildScope; } }
According to the JavaDoc of AssertJ, as() should be before isFalse(): ```suggestion assertThat(reader.isClosed) .as("The reader should have not been closed.") .isFalse(); ```
public void testIdleShutdownSplitFetcherWaitsUntilRecordProcessed() throws Exception { final String splitId = "testSplit"; final AwaitingReader<Integer, TestingSourceSplit> reader = new AwaitingReader<>( new IOException("Should not happen"), new RecordsBySplits<>( Collections.emptyMap(), Collections.singleton(splitId))); final SplitFetcherManager<Integer, TestingSourceSplit> fetcherManager = createFetcher(splitId, reader, new Configuration()); try { FutureCompletingBlockingQueue<RecordsWithSplitIds<Integer>> queue = fetcherManager.getQueue(); queue.getAvailabilityFuture().get(); waitUntil( () -> { fetcherManager.maybeShutdownFinishedFetchers(); return fetcherManager.getNumAliveFetchers() == 0; }, Duration.ofSeconds(1), "The fetcher should have already been removed from the alive fetchers."); waitUntil( () -> queue.size() == 2, Duration.ofSeconds(1), "The element queue should have 2 batches when the fetcher is closed."); queue.poll().recycle(); assertThat(reader.isClosed) .isFalse() .as("The reader should have not been closed."); queue.poll().recycle(); waitUntil( () -> reader.isClosed, Duration.ofSeconds(1), "The reader should hava been closed."); } finally { fetcherManager.close(30_000); } }
.as("The reader should have not been closed.");
public void testIdleShutdownSplitFetcherWaitsUntilRecordProcessed() throws Exception { final String splitId = "testSplit"; final AwaitingReader<Integer, TestingSourceSplit> reader = new AwaitingReader<>( new IOException("Should not happen"), new RecordsBySplits<>( Collections.emptyMap(), Collections.singleton(splitId))); final SplitFetcherManager<Integer, TestingSourceSplit> fetcherManager = createFetcher(splitId, reader, new Configuration()); try { FutureCompletingBlockingQueue<RecordsWithSplitIds<Integer>> queue = fetcherManager.getQueue(); queue.getAvailabilityFuture().get(); waitUntil( () -> { fetcherManager.maybeShutdownFinishedFetchers(); return fetcherManager.getNumAliveFetchers() == 0; }, Duration.ofSeconds(1), "The fetcher should have already been removed from the alive fetchers."); waitUntil( () -> queue.size() == 2, Duration.ofSeconds(1), "The element queue should have 2 batches when the fetcher is closed."); queue.poll().recycle(); assertThat(reader.isClosed).as("The reader should have not been closed.").isFalse(); queue.poll().recycle(); waitUntil( () -> reader.isClosed, Duration.ofSeconds(1), "The reader should hava been closed."); } finally { fetcherManager.close(30_000); } }
class SplitFetcherManagerTest { @Test public void testExceptionPropagationFirstFetch() throws Exception { testExceptionPropagation(); } @Test public void testExceptionPropagationSuccessiveFetch() throws Exception { testExceptionPropagation( new TestingRecordsWithSplitIds<>("testSplit", 1, 2, 3, 4), new TestingRecordsWithSplitIds<>("testSplit", 5, 6, 7, 8)); } @Test public void testCloseFetcherWithException() throws Exception { TestingSplitReader<Object, TestingSourceSplit> reader = new TestingSplitReader<>(); reader.setCloseWithException(); SplitFetcherManager<Object, TestingSourceSplit> fetcherManager = createFetcher("test-split", reader, new Configuration()); fetcherManager.close(1000L); assertThatThrownBy(fetcherManager::checkErrors) .hasRootCauseMessage("Artificial exception on closing the split reader."); } @Test public void testCloseCleansUpPreviouslyClosedFetcher() throws Exception { final String splitId = "testSplit"; Configuration config = new Configuration(); config.set(SourceReaderOptions.ELEMENT_QUEUE_CAPACITY, 1); final AwaitingReader<Integer, TestingSourceSplit> reader = new AwaitingReader<>( new IOException("Should not happen"), new RecordsBySplits<>( Collections.emptyMap(), Collections.singleton(splitId))); final SplitFetcherManager<Integer, TestingSourceSplit> fetcherManager = createFetcher(splitId, reader, config); fetcherManager.getQueue().getAvailabilityFuture().get(); waitUntil( () -> { fetcherManager.maybeShutdownFinishedFetchers(); return fetcherManager.fetchers.isEmpty(); }, "The idle fetcher should have been removed."); fetcherManager.close(30_000); } @Test @SuppressWarnings("FinalPrivateMethod") @SafeVarargs private final void testExceptionPropagation( final RecordsWithSplitIds<Integer>... fetchesBeforeError) throws Exception { final IOException testingException = new IOException("test"); final AwaitingReader<Integer, TestingSourceSplit> reader = new AwaitingReader<>(testingException, fetchesBeforeError); final Configuration configuration = new Configuration(); configuration.set(SourceReaderOptions.ELEMENT_QUEUE_CAPACITY, 10); final SplitFetcherManager<Integer, TestingSourceSplit> fetcher = createFetcher("testSplit", reader, configuration); reader.awaitAllRecordsReturned(); drainQueue(fetcher.getQueue()); assertThat(fetcher.getQueue().getAvailabilityFuture().isDone()).isFalse(); reader.triggerThrowException(); fetcher.getQueue().getAvailabilityFuture().get(); try { fetcher.checkErrors(); fail("expected exception"); } catch (Exception e) { assertThat(e.getCause().getCause()).isSameAs(testingException); } finally { fetcher.close(20_000L); } } private static <E> SplitFetcherManager<E, TestingSourceSplit> createFetcher( final String splitId, final SplitReader<E, TestingSourceSplit> reader, final Configuration configuration) { final SingleThreadFetcherManager<E, TestingSourceSplit> fetcher = new SingleThreadFetcherManager<>(() -> reader, configuration); fetcher.addSplits(Collections.singletonList(new TestingSourceSplit(splitId))); return fetcher; } private static void drainQueue(FutureCompletingBlockingQueue<?> queue) { while (queue.poll() != null) {} } private static final class AwaitingReader<E, SplitT extends SourceSplit> implements SplitReader<E, SplitT> { private final Queue<RecordsWithSplitIds<E>> fetches; private final IOException testError; private final OneShotLatch inBlocking = new OneShotLatch(); private final OneShotLatch throwError = new OneShotLatch(); private volatile boolean isClosed = false; @SafeVarargs AwaitingReader(IOException testError, RecordsWithSplitIds<E>... fetches) { this.testError = testError; this.fetches = new ArrayDeque<>(Arrays.asList(fetches)); } @Override public RecordsWithSplitIds<E> fetch() throws IOException { if (!fetches.isEmpty()) { return fetches.poll(); } else { inBlocking.trigger(); try { throwError.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("interrupted"); } throw testError; } } @Override public void handleSplitsChanges(SplitsChange<SplitT> splitsChanges) {} @Override public void wakeUp() {} @Override public void close() throws Exception { isClosed = true; } public void awaitAllRecordsReturned() throws InterruptedException { inBlocking.await(); } public void triggerThrowException() { throwError.trigger(); } } }
class SplitFetcherManagerTest { @Test public void testExceptionPropagationFirstFetch() throws Exception { testExceptionPropagation(); } @Test public void testExceptionPropagationSuccessiveFetch() throws Exception { testExceptionPropagation( new TestingRecordsWithSplitIds<>("testSplit", 1, 2, 3, 4), new TestingRecordsWithSplitIds<>("testSplit", 5, 6, 7, 8)); } @Test public void testCloseFetcherWithException() throws Exception { TestingSplitReader<Object, TestingSourceSplit> reader = new TestingSplitReader<>(); reader.setCloseWithException(); SplitFetcherManager<Object, TestingSourceSplit> fetcherManager = createFetcher("test-split", reader, new Configuration()); fetcherManager.close(1000L); assertThatThrownBy(fetcherManager::checkErrors) .hasRootCauseMessage("Artificial exception on closing the split reader."); } @Test(timeout = 30000) public void testCloseCleansUpPreviouslyClosedFetcher() throws Exception { final String splitId = "testSplit"; Configuration config = new Configuration(); config.set(SourceReaderOptions.ELEMENT_QUEUE_CAPACITY, 1); final AwaitingReader<Integer, TestingSourceSplit> reader = new AwaitingReader<>( new IOException("Should not happen"), new RecordsBySplits<>( Collections.emptyMap(), Collections.singleton(splitId))); final SplitFetcherManager<Integer, TestingSourceSplit> fetcherManager = createFetcher(splitId, reader, config); fetcherManager.getQueue().getAvailabilityFuture().get(); waitUntil( () -> { fetcherManager.maybeShutdownFinishedFetchers(); return fetcherManager.fetchers.isEmpty(); }, "The idle fetcher should have been removed."); fetcherManager.close(60_000); } @Test @SuppressWarnings("FinalPrivateMethod") @SafeVarargs private final void testExceptionPropagation( final RecordsWithSplitIds<Integer>... fetchesBeforeError) throws Exception { final IOException testingException = new IOException("test"); final AwaitingReader<Integer, TestingSourceSplit> reader = new AwaitingReader<>(testingException, fetchesBeforeError); final Configuration configuration = new Configuration(); configuration.set(SourceReaderOptions.ELEMENT_QUEUE_CAPACITY, 10); final SplitFetcherManager<Integer, TestingSourceSplit> fetcher = createFetcher("testSplit", reader, configuration); reader.awaitAllRecordsReturned(); drainQueue(fetcher.getQueue()); assertThat(fetcher.getQueue().getAvailabilityFuture().isDone()).isFalse(); reader.triggerThrowException(); fetcher.getQueue().getAvailabilityFuture().get(); try { fetcher.checkErrors(); fail("expected exception"); } catch (Exception e) { assertThat(e.getCause().getCause()).isSameAs(testingException); } finally { fetcher.close(20_000L); } } private static <E> SplitFetcherManager<E, TestingSourceSplit> createFetcher( final String splitId, final SplitReader<E, TestingSourceSplit> reader, final Configuration configuration) { final SingleThreadFetcherManager<E, TestingSourceSplit> fetcher = new SingleThreadFetcherManager<>(() -> reader, configuration); fetcher.addSplits(Collections.singletonList(new TestingSourceSplit(splitId))); return fetcher; } private static void drainQueue(FutureCompletingBlockingQueue<?> queue) { while (queue.poll() != null) {} } private static final class AwaitingReader<E, SplitT extends SourceSplit> implements SplitReader<E, SplitT> { private final Queue<RecordsWithSplitIds<E>> fetches; private final IOException testError; private final OneShotLatch inBlocking = new OneShotLatch(); private final OneShotLatch throwError = new OneShotLatch(); private volatile boolean isClosed = false; @SafeVarargs AwaitingReader(IOException testError, RecordsWithSplitIds<E>... fetches) { this.testError = testError; this.fetches = new ArrayDeque<>(Arrays.asList(fetches)); } @Override public RecordsWithSplitIds<E> fetch() throws IOException { if (!fetches.isEmpty()) { return fetches.poll(); } else { inBlocking.trigger(); try { throwError.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("interrupted"); } throw testError; } } @Override public void handleSplitsChanges(SplitsChange<SplitT> splitsChanges) {} @Override public void wakeUp() {} @Override public void close() throws Exception { isClosed = true; } public void awaitAllRecordsReturned() throws InterruptedException { inBlocking.await(); } public void triggerThrowException() { throwError.trigger(); } } }
Why is the continuation range needed? Isn't the information in the FeedRange enough (getFeedRange)?
private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { final Range<String> continuationRange = request.getContinuationRange(); if (continuationRange != null && !continuationRange.equals(PartitionKeyInternalHelper.FullRange)) { Mono<Range<String>> getEffectiveRangeTask = request.getFeedRange().getEffectiveRange( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)); return getEffectiveRangeTask .flatMap(feedRangeRange -> { if (!Range.checkOverlapping( continuationRange, feedRangeRange)) { return Mono.error(new NotFoundException( String.format("Incompatible continuation token range '%s - %s' and feed range '%s - %s'.", continuationRange.getMin(), continuationRange.getMax(), feedRangeRange.getMin(), feedRangeRange.getMax()))); } Range.MinComparator<String> minComparator = new Range.MinComparator<>(); Range.MaxComparator<String> maxComparator = new Range.MaxComparator<>(); boolean isMaxInclusive; boolean isMinInclusive; String effectiveMax; String effectiveMin; if (minComparator.compare(continuationRange, feedRangeRange) > 0) { effectiveMin = continuationRange.getMin(); isMinInclusive = continuationRange.isMinInclusive(); } else { effectiveMin = feedRangeRange.getMin(); isMinInclusive = feedRangeRange.isMinInclusive(); } if (maxComparator.compare(continuationRange, feedRangeRange) <= 0) { effectiveMax = continuationRange.getMax(); isMaxInclusive = continuationRange.isMaxInclusive(); } else { effectiveMax = feedRangeRange.getMax(); isMaxInclusive = feedRangeRange.isMaxInclusive(); } final Range<String> effectiveRange = new Range<>(effectiveMin, effectiveMax, isMinInclusive, isMaxInclusive); final FeedRangeInternal effectiveFeedRange = effectiveRange.equals(feedRangeRange) ? request.getFeedRange() : new FeedRangeEpkImpl(effectiveRange); return effectiveFeedRange .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); }); } return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); }
final Range<String> continuationRange = request.getContinuationRange();
private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final HttpClient reactorHttpClient; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled) { activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.getAndDecrement(); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { return BridgeInternal.createCosmosDiagnostics(this); } private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid"); throw new RuntimeException("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid"); } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); } public void init() { try { this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(), ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, httpClient(), connectionPolicy.isClientTelemetryEnabled()); clientTelemetry.init(); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled ); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withRequestTimeout(this.connectionPolicy.getRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, false ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, options); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId), invalidPartitionExceptionRetryPolicy); } private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal( String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery , options, resourceLink, false, activityId); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) { queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; return iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.GET) .flatMap(gatewayProxy::processMessage); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, CosmosQueryRequestOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if(options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } return headers; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private static PartitionKeyInternal extractPartitionKeyValueFromDocument( InternalObjectNode document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts); if (value == null || value.getClass() == ObjectNode.class) { value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); return request; } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document) { return false; } switch (request.getOperationType()) { case ReadFeed: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (requestPopulated.requestContext != null && retryPolicy.getRetryCount() > 0) { retryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(retryPolicy, true); } return storeProxy.processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, RequestVerb.PATCH); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); } @Override public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy)); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance); } private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert); Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance)); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs.flatMap(req -> patch(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs); return requestObs.flatMap(req -> this .delete(req, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, CosmosQueryRequestOptions options, Class<T> klass) { String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } itemIdentityList .forEach(itemIdentity -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap; rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); return createReadManyQuery( resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), options, Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponse(finalList, headers); return frp; }); }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); if (!Objects.equals(idValue, pkValue)) { continue; } parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum); return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { return RxDocumentClientImpl.this.query(request).single(); } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed( final String collectionLink, final CosmosChangeFeedRequestOptions changeFeedOptions) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(this, ResourceType.Document, Document.class, collectionLink, changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<Document>> readAllDocuments( String collectionLink, PartitionKey partitionKey, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this); final CosmosQueryRequestOptions effectiveOptions = ModelBridgeInternal.createQueryRequestOptions(options); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, effectiveOptions); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( resourceLink, querySpec, ModelBridgeInternal.partitionKeyRangeIdInternal(effectiveOptions, range.getId()), Document.class, ResourceType.Document, queryClient, activityId); }); }, invalidPartitionExceptionRetryPolicy); }); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, List<Object> procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<TransactionalBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<TransactionalBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy)); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, CosmosQueryRequestOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new CosmosQueryRequestOptions(); } Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int maxPageSize = maxItemCount != null ? maxItemCount : -1; final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options; BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper .inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), this.resetSessionTokenRetryPolicy.getRequestPolicy()); return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } @Override public DatabaseAccount getLatestDatabaseAccount() { return this.globalEndpointManager.getLatestDatabaseAccount(); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null); return valueHolderMono.map(RxDocumentClientImpl::toFeedRanges); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { throw new IllegalStateException("PartitionKeyRange list cannot be null"); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangePartitionKeyRangeImpl(pkRange.getId()); } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private ConcurrentMap<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final HttpClient reactorHttpClient; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled) { activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.getAndDecrement(); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { return BridgeInternal.createCosmosDiagnostics(this); } private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid"); throw new RuntimeException("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid"); } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); } public void init() { try { this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } clientTelemetry = new ClientTelemetry(null, UUID.randomUUID().toString(), ManagementFactory.getRuntimeMXBean().getName(), userAgentContainer.getUserAgent(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, httpClient(), connectionPolicy.isClientTelemetryEnabled()); clientTelemetry.init(); this.queryPlanCache = new ConcurrentHashMap<>(); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled ); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withRequestTimeout(this.connectionPolicy.getRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, false ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, options); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal(resourceLink, sqlQuery, options, klass, resourceTypeEnum, queryClient, activityId), invalidPartitionExceptionRetryPolicy); } private <T extends Resource> Flux<FeedResponse<T>> createQueryInternal( String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(this, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedDocumentQueryExecutionContext) { queryInfo = ((PipelinedDocumentQueryExecutionContext<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; return iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, CosmosQueryRequestOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.GET) .flatMap(gatewayProxy::processMessage); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, CosmosQueryRequestOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if(options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } return headers; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private static PartitionKeyInternal extractPartitionKeyValueFromDocument( InternalObjectNode document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts); if (value == null || value.getClass() == ObjectNode.class) { value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); return request; } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document) { return false; } switch (request.getOperationType()) { case ReadFeed: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (requestPopulated.requestContext != null && retryPolicy.getRetryCount() > 0) { retryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(retryPolicy, true); } return storeProxy.processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeaders(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (requestPopulated.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); requestPopulated.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, RequestVerb.PATCH); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); } @Override public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy)); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance); } private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert); Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> upsert(request, retryPolicyInstance)); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> patchDocumentInternal(documentLink, cosmosPatchOperations, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Document>> patchDocumentInternal(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs.flatMap(req -> patch(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, null, options, requestRetryPolicy), requestRetryPolicy); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, internalObjectNode, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, internalObjectNode, options, collectionObs); return requestObs.flatMap(req -> this .delete(req, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Document, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> { return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, CosmosQueryRequestOptions options, Class<T> klass) { String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } itemIdentityList .forEach(itemIdentity -> { String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap; rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); return createReadManyQuery( resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), options, Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponse(finalList, headers); return frp; }); }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); if (!Objects.equals(idValue, pkValue)) { continue; } parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery( String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync(this, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum); return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { return RxDocumentClientImpl.this.query(request).single(); } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, Document.class, collection.getSelfLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<Document>> readAllDocuments( String collectionLink, PartitionKey partitionKey, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this); final CosmosQueryRequestOptions effectiveOptions = ModelBridgeInternal.createQueryRequestOptions(options); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, effectiveOptions); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { throw new IllegalStateException("Failed to get routing map."); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( resourceLink, querySpec, ModelBridgeInternal.partitionKeyRangeIdInternal(effectiveOptions, range.getId()), Document.class, ResourceType.Document, queryClient, activityId); }); }, invalidPartitionExceptionRetryPolicy); }); } @Override public ConcurrentMap<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, List<Object> procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<TransactionalBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<TransactionalBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy)); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, CosmosQueryRequestOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, CosmosQueryRequestOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, CosmosQueryRequestOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(CosmosQueryRequestOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new CosmosQueryRequestOptions(); } Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int maxPageSize = maxItemCount != null ? maxItemCount : -1; final CosmosQueryRequestOptions finalCosmosQueryRequestOptions = options; BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalCosmosQueryRequestOptions); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> ObservableHelper .inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), this.resetSessionTokenRetryPolicy.getRequestPolicy()); return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, CosmosQueryRequestOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, CosmosQueryRequestOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } @Override public DatabaseAccount getLatestDatabaseAccount() { return this.globalEndpointManager.getLatestDatabaseAccount(); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeaders(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { throw new IllegalStateException("Collection cannot be null"); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null); return valueHolderMono.map(RxDocumentClientImpl::toFeedRanges); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { throw new IllegalStateException("PartitionKeyRange list cannot be null"); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangePartitionKeyRangeImpl(pkRange.getId()); } }
👍good catch, `Thread.sleep(50);` is deleted, so the `InterruptedException` wouldn't throw.
private void seizeCapacity() { if (currentContext.isKeyOccupied()) { return; } RecordContext<R, K> storedContext = currentContext; try { while (inFlightRecordNum.get() > maxInFlightRecordNum) { if (!mailboxExecutor.tryYield()) { triggerIfNeeded(true); Thread.sleep(50); } } setCurrentContext(storedContext); inFlightRecordNum.incrementAndGet(); } catch (InterruptedException e) { throw new FlinkRuntimeException(e); } }
} catch (InterruptedException e) {
private void seizeCapacity() { if (currentContext.isKeyOccupied()) { return; } RecordContext<R, K> storedContext = currentContext; try { while (inFlightRecordNum.get() > maxInFlightRecordNum) { if (!mailboxExecutor.tryYield()) { triggerIfNeeded(true); Thread.sleep(1); } } } catch (InterruptedException ignored) { } setCurrentContext(storedContext); inFlightRecordNum.incrementAndGet(); }
class AsyncExecutionController<R, K> { private static final Logger LOG = LoggerFactory.getLogger(AsyncExecutionController.class); public static final int DEFAULT_BATCH_SIZE = 1000; public static final int DEFAULT_MAX_IN_FLIGHT_RECORD_NUM = 6000; /** * The batch size. When the number of state requests in the active buffer exceeds the batch * size, a batched state execution would be triggered. */ private final int batchSize; /** The max allow number of in-flight records. */ private final int maxInFlightRecordNum; /** * The mailbox executor borrowed from {@code StreamTask}. Keeping the reference of * mailboxExecutor here is to restrict the number of in-flight records, when the number of * in-flight records > {@link * blocked. */ private final MailboxExecutor mailboxExecutor; /** The key accounting unit which is used to detect the key conflict. */ final KeyAccountingUnit<R, K> keyAccountingUnit; /** * A factory to build {@link org.apache.flink.core.state.InternalStateFuture}, this will auto * wire the created future with mailbox executor. Also conducting the context switch. */ private final StateFutureFactory<R, K> stateFutureFactory; /** The state executor where the {@link StateRequest} is actually executed. */ final StateExecutor stateExecutor; /** The corresponding context that currently runs in task thread. */ RecordContext<R, K> currentContext; /** The buffer to store the state requests to execute in batch. */ StateRequestsBuffer<R, K> stateRequestsBuffer; /** The number of in-flight records. */ final AtomicInteger inFlightRecordNum; public AsyncExecutionController(MailboxExecutor mailboxExecutor, StateExecutor stateExecutor) { this(mailboxExecutor, stateExecutor, DEFAULT_BATCH_SIZE, DEFAULT_MAX_IN_FLIGHT_RECORD_NUM); } public AsyncExecutionController( MailboxExecutor mailboxExecutor, StateExecutor stateExecutor, int batchSize, int maxInFlightRecords) { this.keyAccountingUnit = new KeyAccountingUnit<>(); this.mailboxExecutor = mailboxExecutor; this.stateFutureFactory = new StateFutureFactory<>(this, mailboxExecutor); this.stateExecutor = stateExecutor; this.batchSize = batchSize; this.maxInFlightRecordNum = maxInFlightRecords; this.stateRequestsBuffer = new StateRequestsBuffer<>(); this.inFlightRecordNum = new AtomicInteger(0); LOG.info("Create AsyncExecutionController: maxInFlightRecordsNum {}", maxInFlightRecords); } /** * Build a new context based on record and key. Also wired with internal {@link * KeyAccountingUnit}. * * @param record the given record. * @param key the given key. * @return the built record context. */ public RecordContext<R, K> buildContext(R record, K key) { return new RecordContext<>(record, key, this::disposeContext); } /** * Each time before a code segment (callback) is about to run in mailbox (task thread), this * method should be called to switch a context in AEC. * * @param switchingContext the context to switch. */ public void setCurrentContext(RecordContext<R, K> switchingContext) { currentContext = switchingContext; } /** * Dispose a context. * * @param toDispose the context to dispose. */ public void disposeContext(RecordContext<R, K> toDispose) { keyAccountingUnit.release(toDispose.getRecord(), toDispose.getKey()); inFlightRecordNum.decrementAndGet(); RecordContext<R, K> nextRecordCtx = stateRequestsBuffer.tryMigrateSpecificOne(toDispose.getKey()); if (nextRecordCtx != null) { Preconditions.checkState( tryOccupyKey(nextRecordCtx), String.format("key(%s) is already occupied.", nextRecordCtx.getKey())); } } /** * Try to occupy a key by a given context. * * @param recordContext the given context. * @return true if occupy succeed or the key has already occupied by this context. */ boolean tryOccupyKey(RecordContext<R, K> recordContext) { boolean occupied = recordContext.isKeyOccupied(); if (!occupied && keyAccountingUnit.occupy(recordContext.getRecord(), recordContext.getKey())) { recordContext.setKeyOccupied(); occupied = true; } return occupied; } /** * Submit a {@link StateRequest} to this AEC and trigger if needed. * * @param state the state to request. * @param type the type of this request. * @param payload the payload input for this request. * @return the state future. */ public <IN, OUT> InternalStateFuture<OUT> handleRequest( @Nullable State state, StateRequestType type, @Nullable IN payload) { InternalStateFuture<OUT> stateFuture = stateFutureFactory.build(currentContext); StateRequest<K, IN, OUT> request = new StateRequest<>(state, type, payload, stateFuture, currentContext); seizeCapacity(); if (tryOccupyKey(currentContext)) { insertActiveBuffer(request); } else { insertBlockingBuffer(request); } triggerIfNeeded(false); return stateFuture; } <IN, OUT> void insertActiveBuffer(StateRequest<K, IN, OUT> request) { stateRequestsBuffer.enqueueToActive(request); } <IN, OUT> void insertBlockingBuffer(StateRequest<K, IN, OUT> request) { stateRequestsBuffer.enqueueToBlocking(request); } /** * Trigger a batch of requests. * * @param force whether to trigger requests in force. */ void triggerIfNeeded(boolean force) { if (!force && stateRequestsBuffer.activeQueueSize() < batchSize) { return; } List<StateRequest<?, ?, ?>> toRun = stateRequestsBuffer.nextToRun(); stateRequestsBuffer.clearActiveQueue(); stateExecutor.executeBatchRequests(toRun); } }
class AsyncExecutionController<R, K> { private static final Logger LOG = LoggerFactory.getLogger(AsyncExecutionController.class); public static final int DEFAULT_BATCH_SIZE = 1000; public static final int DEFAULT_MAX_IN_FLIGHT_RECORD_NUM = 6000; /** * The batch size. When the number of state requests in the active buffer exceeds the batch * size, a batched state execution would be triggered. */ private final int batchSize; /** The max allowed number of in-flight records. */ private final int maxInFlightRecordNum; /** * The mailbox executor borrowed from {@code StreamTask}. Keeping the reference of * mailboxExecutor here is to restrict the number of in-flight records, when the number of * in-flight records > {@link * blocked. */ private final MailboxExecutor mailboxExecutor; /** The key accounting unit which is used to detect the key conflict. */ final KeyAccountingUnit<R, K> keyAccountingUnit; /** * A factory to build {@link org.apache.flink.core.state.InternalStateFuture}, this will auto * wire the created future with mailbox executor. Also conducting the context switch. */ private final StateFutureFactory<R, K> stateFutureFactory; /** The state executor where the {@link StateRequest} is actually executed. */ final StateExecutor stateExecutor; /** The corresponding context that currently runs in task thread. */ RecordContext<R, K> currentContext; /** The buffer to store the state requests to execute in batch. */ StateRequestBuffer<R, K> stateRequestsBuffer; /** * The number of in-flight records. Including the records in active buffer and blocking buffer. */ final AtomicInteger inFlightRecordNum; public AsyncExecutionController(MailboxExecutor mailboxExecutor, StateExecutor stateExecutor) { this(mailboxExecutor, stateExecutor, DEFAULT_BATCH_SIZE, DEFAULT_MAX_IN_FLIGHT_RECORD_NUM); } public AsyncExecutionController( MailboxExecutor mailboxExecutor, StateExecutor stateExecutor, int batchSize, int maxInFlightRecords) { this.keyAccountingUnit = new KeyAccountingUnit<>(maxInFlightRecords); this.mailboxExecutor = mailboxExecutor; this.stateFutureFactory = new StateFutureFactory<>(this, mailboxExecutor); this.stateExecutor = stateExecutor; this.batchSize = batchSize; this.maxInFlightRecordNum = maxInFlightRecords; this.stateRequestsBuffer = new StateRequestBuffer<>(); this.inFlightRecordNum = new AtomicInteger(0); LOG.info( "Create AsyncExecutionController: batchSize {}, maxInFlightRecordsNum {}", batchSize, maxInFlightRecords); } /** * Build a new context based on record and key. Also wired with internal {@link * KeyAccountingUnit}. * * @param record the given record. * @param key the given key. * @return the built record context. */ public RecordContext<R, K> buildContext(R record, K key) { return new RecordContext<>(record, key, this::disposeContext); } /** * Each time before a code segment (callback) is about to run in mailbox (task thread), this * method should be called to switch a context in AEC. * * @param switchingContext the context to switch. */ public void setCurrentContext(RecordContext<R, K> switchingContext) { currentContext = switchingContext; } /** * Dispose a context. * * @param toDispose the context to dispose. */ public void disposeContext(RecordContext<R, K> toDispose) { keyAccountingUnit.release(toDispose.getRecord(), toDispose.getKey()); inFlightRecordNum.decrementAndGet(); RecordContext<R, K> nextRecordCtx = stateRequestsBuffer.tryActivateOneByKey(toDispose.getKey()); if (nextRecordCtx != null) { Preconditions.checkState( tryOccupyKey(nextRecordCtx), String.format("key(%s) is already occupied.", nextRecordCtx.getKey())); } } /** * Try to occupy a key by a given context. * * @param recordContext the given context. * @return true if occupy succeed or the key has already occupied by this context. */ boolean tryOccupyKey(RecordContext<R, K> recordContext) { boolean occupied = recordContext.isKeyOccupied(); if (!occupied && keyAccountingUnit.occupy(recordContext.getRecord(), recordContext.getKey())) { recordContext.setKeyOccupied(); occupied = true; } return occupied; } /** * Submit a {@link StateRequest} to this AEC and trigger if needed. * * @param state the state to request. Could be {@code null} if the type is {@link * StateRequestType * @param type the type of this request. * @param payload the payload input for this request. * @return the state future. */ public <IN, OUT> InternalStateFuture<OUT> handleRequest( @Nullable State state, StateRequestType type, @Nullable IN payload) { InternalStateFuture<OUT> stateFuture = stateFutureFactory.create(currentContext); StateRequest<K, IN, OUT> request = new StateRequest<>(state, type, payload, stateFuture, currentContext); seizeCapacity(); if (tryOccupyKey(currentContext)) { insertActiveBuffer(request); } else { insertBlockingBuffer(request); } triggerIfNeeded(false); return stateFuture; } <IN, OUT> void insertActiveBuffer(StateRequest<K, IN, OUT> request) { stateRequestsBuffer.enqueueToActive(request); } <IN, OUT> void insertBlockingBuffer(StateRequest<K, IN, OUT> request) { stateRequestsBuffer.enqueueToBlocking(request); } /** * Trigger a batch of requests. * * @param force whether to trigger requests in force. */ void triggerIfNeeded(boolean force) { if (!force && stateRequestsBuffer.activeQueueSize() < batchSize) { return; } List<StateRequest<?, ?, ?>> toRun = stateRequestsBuffer.popActive(batchSize); stateExecutor.executeBatchRequests(toRun); } }
You can use Objects.requireNonNull to ensure that the parameter cannot be null
public InPredicate(Expression compareExpr, List<Expression> optionsList) { super(new Builder<Expression>().add(compareExpr).addAll(optionsList).build().toArray(new Expression[0])); this.compareExpr = compareExpr; this.optionsList = ImmutableList.copyOf(Objects.requireNonNull(optionsList, "In list cannot be null")); }
this.compareExpr = compareExpr;
public InPredicate(Expression compareExpr, List<Expression> optionsList) { super(new Builder<Expression>().add(compareExpr).addAll(optionsList).build().toArray(new Expression[0])); this.compareExpr = Objects.requireNonNull(compareExpr, "Compare Expr cannot be null"); this.options = ImmutableList.copyOf(Objects.requireNonNull(optionsList, "In list cannot be null")); }
class InPredicate extends Expression { private Expression compareExpr; private List<Expression> optionsList; public <R, C> R accept(ExpressionVisitor<R, C> visitor, C context) { return visitor.visitInPredicate(this, context); } @Override public DataType getDataType() throws UnboundException { return BooleanType.INSTANCE; } @Override public boolean nullable() throws UnboundException { return optionsList.stream().map(Expression::nullable) .reduce((a, b) -> a || b).get(); } @Override public String toString() { return compareExpr + " IN " + optionsList.stream() .map(Expression::toString) .collect(Collectors.joining(",", "(", ")")); } @Override public String toSql() { return compareExpr.toSql() + " IN " + optionsList.stream() .map(Expression::toSql) .collect(Collectors.joining(",", "(", ")")); } public Expression getCompareExpr() { return compareExpr; } public List<Expression> getOptionsList() { return optionsList; } }
class InPredicate extends Expression { private final Expression compareExpr; private final List<Expression> options; public <R, C> R accept(ExpressionVisitor<R, C> visitor, C context) { return visitor.visitInPredicate(this, context); } @Override public DataType getDataType() throws UnboundException { return BooleanType.INSTANCE; } @Override public boolean nullable() throws UnboundException { return children().stream().anyMatch(Expression::nullable); } @Override public Expression withChildren(List<Expression> children) { Preconditions.checkArgument(children.size() > 1); return new InPredicate(children.get(0), ImmutableList.copyOf(children).subList(1, children.size())); } @Override public String toString() { return compareExpr + " IN " + options.stream() .map(Expression::toString) .collect(Collectors.joining(", ", "(", ")")); } @Override public String toSql() { return compareExpr.toSql() + " IN " + options.stream() .map(Expression::toSql) .collect(Collectors.joining(", ", "(", ")")); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } InPredicate that = (InPredicate) o; return Objects.equals(compareExpr, that.getCompareExpr()) && Objects.equals(options, that.getOptions()); } @Override public int hashCode() { return Objects.hash(compareExpr, options); } public Expression getCompareExpr() { return compareExpr; } public List<Expression> getOptions() { return options; } }
Logic aggregate above could have project operator.
private Void tryGatherForBroadcastJoin(PhysicalHashJoinOperator node, ExpressionContext context) { List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> result = Lists.newArrayList(); if ((context.getChildOperator(0) instanceof LogicalAggregationOperator)) { LogicalAggregationOperator childOperator = (LogicalAggregationOperator) context.getChildOperator(0); if (childOperator.getType().equals(AggType.GLOBAL) && childOperator.getGroupingKeys().isEmpty()) { for (Pair<PhysicalPropertySet, List<PhysicalPropertySet>> outputInputProp : outputInputProps) { PhysicalPropertySet left = outputInputProp.second.get(0); PhysicalPropertySet right = outputInputProp.second.get(1); if (left.getDistributionProperty().isAny() && right.getDistributionProperty().isBroadcast()) { result.add( new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements(), right))); } } outputInputProps = result; return visitOperator(node, context); } } return visitOperator(node, context); }
if ((context.getChildOperator(0) instanceof LogicalAggregationOperator)) {
private Void tryGatherForBroadcastJoin(PhysicalHashJoinOperator node, ExpressionContext context) { List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> result = Lists.newArrayList(); if (context.getChildLogicalProperty(0).isGatherToOneInstance()) { for (Pair<PhysicalPropertySet, List<PhysicalPropertySet>> outputInputProp : outputInputProps) { PhysicalPropertySet left = outputInputProp.second.get(0); PhysicalPropertySet right = outputInputProp.second.get(1); if (left.getDistributionProperty().isAny() && right.getDistributionProperty().isBroadcast()) { result.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements(), right))); } else { result.add(outputInputProp); } } outputInputProps = result; return visitOperator(node, context); } return visitOperator(node, context); }
class ChildPropertyDeriver extends OperatorVisitor<Void, ExpressionContext> { private PhysicalPropertySet requirements; private List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> outputInputProps; private final TaskContext taskContext; private final OptimizerContext context; public ChildPropertyDeriver(TaskContext taskContext) { this.taskContext = taskContext; this.context = taskContext.getOptimizerContext(); } public List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> getOutputInputProps( PhysicalPropertySet requirements, GroupExpression groupExpression) { this.requirements = requirements; outputInputProps = Lists.newArrayList(); groupExpression.getOp().accept(this, new ExpressionContext(groupExpression)); return outputInputProps; } private PhysicalPropertySet distributeRequirements() { return new PhysicalPropertySet(requirements.getDistributionProperty()); } @Override public Void visitOperator(Operator node, ExpressionContext context) { return null; } @Override public Void visitPhysicalHashJoin(PhysicalHashJoinOperator node, ExpressionContext context) { String hint = node.getJoinHint(); PhysicalPropertySet rightBroadcastProperty = new PhysicalPropertySet(new DistributionProperty(DistributionSpec.createReplicatedDistributionSpec())); LogicalOperator leftChild = (LogicalOperator) context.getChildOperator(0); LogicalOperator rightChild = (LogicalOperator) context.getChildOperator(1); if (leftChild.hasLimit() || rightChild.hasLimit()) { if (leftChild.hasLimit()) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(createLimitGatherProperty(leftChild.getLimit()), rightBroadcastProperty))); } else { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(new PhysicalPropertySet(), rightBroadcastProperty))); } return visitJoinRequirements(node, context); } else { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(new PhysicalPropertySet(), rightBroadcastProperty))); } ColumnRefSet leftChildColumns = context.getChildOutputColumns(0); ColumnRefSet rightChildColumns = context.getChildOutputColumns(1); List<BinaryPredicateOperator> equalOnPredicate = getEqConj(leftChildColumns, rightChildColumns, Utils.extractConjuncts(node.getJoinPredicate())); if (node.getJoinType().isCrossJoin() || JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN.equals(node.getJoinType()) || (node.getJoinType().isInnerJoin() && equalOnPredicate.isEmpty()) || "BROADCAST".equalsIgnoreCase(hint)) { return visitJoinRequirements(node, context); } if (node.getJoinType().isRightJoin() || node.getJoinType().isFullOuterJoin() || "SHUFFLE".equalsIgnoreCase(hint)) { outputInputProps.clear(); } List<Integer> leftOnPredicateColumns = new ArrayList<>(); List<Integer> rightOnPredicateColumns = new ArrayList<>(); JoinPredicateUtils.getJoinOnPredicatesColumns(equalOnPredicate, leftChildColumns, rightChildColumns, leftOnPredicateColumns, rightOnPredicateColumns); Preconditions.checkState(leftOnPredicateColumns.size() == rightOnPredicateColumns.size()); HashDistributionSpec leftDistribution = DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(leftOnPredicateColumns, HashDistributionDesc.SourceType.SHUFFLE_JOIN)); HashDistributionSpec rightDistribution = DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(rightOnPredicateColumns, HashDistributionDesc.SourceType.SHUFFLE_JOIN)); doHashShuffle(equalOnPredicate, leftDistribution, rightDistribution); if ("SHUFFLE".equalsIgnoreCase(hint)) { return visitJoinRequirements(node, context); } if (equalOnPredicate.stream().anyMatch(p -> !isColumnToColumnBinaryPredicate(p))) { return visitJoinRequirements(node, context); } if (!"BUCKET".equalsIgnoreCase(hint)) { tryColocate(leftDistribution, rightDistribution); } tryBucketShuffle(node, leftDistribution, rightDistribution); return visitJoinRequirements(node, context); } private void doHashShuffle(List<BinaryPredicateOperator> equalOnPredicate, HashDistributionSpec leftDistribution, HashDistributionSpec rightDistribution) { if (equalOnPredicate.stream().anyMatch(p -> !isColumnToColumnBinaryPredicate(p))) { PhysicalPropertySet leftProperty = createPropertySetByDistribution(new HashDistributionSpec( new HashDistributionDesc(leftDistribution.getShuffleColumns(), HashDistributionDesc.SourceType.FORCE_SHUFFLE_JOIN))); PhysicalPropertySet rightProperty = createPropertySetByDistribution(new HashDistributionSpec( new HashDistributionDesc(rightDistribution.getShuffleColumns(), HashDistributionDesc.SourceType.FORCE_SHUFFLE_JOIN))); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftProperty, rightProperty))); return; } PhysicalPropertySet leftInputProperty = createPropertySetByDistribution(leftDistribution); PhysicalPropertySet rightInputProperty = createPropertySetByDistribution(rightDistribution); Optional<HashDistributionDesc> requiredShuffleDesc = getRequiredShuffleJoinDesc(); if (!requiredShuffleDesc.isPresent()) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftInputProperty, rightInputProperty))); return; } HashDistributionDesc requiredDesc = requiredShuffleDesc.get(); List<Integer> leftColumns = leftDistribution.getShuffleColumns(); List<Integer> rightColumns = rightDistribution.getShuffleColumns(); List<Integer> requiredColumns = requiredDesc.getColumns(); Preconditions.checkState(leftColumns.size() == rightColumns.size()); boolean checkLeft = leftColumns.containsAll(requiredColumns) && leftColumns.size() == requiredColumns.size(); boolean checkRight = rightColumns.containsAll(requiredColumns) && rightColumns.size() == requiredColumns.size(); if (checkLeft || checkRight) { List<Integer> requiredLeft = Lists.newArrayList(); List<Integer> requiredRight = Lists.newArrayList(); for (Integer cid : requiredColumns) { int idx = checkLeft ? leftColumns.indexOf(cid) : rightColumns.indexOf(cid); requiredLeft.add(leftColumns.get(idx)); requiredRight.add(rightColumns.get(idx)); } PhysicalPropertySet leftShuffleProperty = createPropertySetByDistribution( DistributionSpec.createHashDistributionSpec(new HashDistributionDesc(requiredLeft, HashDistributionDesc.SourceType.SHUFFLE_JOIN))); PhysicalPropertySet rightShuffleProperty = createPropertySetByDistribution( DistributionSpec.createHashDistributionSpec(new HashDistributionDesc(requiredRight, HashDistributionDesc.SourceType.SHUFFLE_JOIN))); outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(leftShuffleProperty, rightShuffleProperty))); return; } outputInputProps .add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftInputProperty, rightInputProperty))); } /* * Colocate will required children support local properties by topdown * All shuffle columns(predicate columns) must come from one table * Can't support case such as (The predicate columns combinations is N*N): * JOIN(s1.A = s3.A AND s2.A = s4.A) * / \ * JOIN JOIN * / \ / \ * s1 s2 s3 s4 * * support case: * JOIN(s1.A = s3.A AND s1.B = s3.B) * / \ * JOIN JOIN * / \ / \ * s1 s2 s3 s4 * * */ private void tryColocate(HashDistributionSpec leftShuffleDistribution, HashDistributionSpec rightShuffleDistribution) { if (Config.disable_colocate_join || ConnectContext.get().getSessionVariable().isDisableColocateJoin()) { return; } Optional<LogicalOlapScanOperator> leftTable = findLogicalOlapScanOperator(leftShuffleDistribution); if (!leftTable.isPresent()) { return; } LogicalOlapScanOperator left = leftTable.get(); Optional<LogicalOlapScanOperator> rightTable = findLogicalOlapScanOperator(rightShuffleDistribution); if (!rightTable.isPresent()) { return; } LogicalOlapScanOperator right = rightTable.get(); ColocateTableIndex colocateIndex = Catalog.getCurrentColocateIndex(); if (left.getTable().getId() == right.getTable().getId() && !colocateIndex.isSameGroup(left.getTable().getId(), right.getTable().getId())) { if (!left.getSelectedPartitionId().equals(right.getSelectedPartitionId()) || left.getSelectedPartitionId().size() > 1) { return; } PhysicalPropertySet rightLocalProperty = createPropertySetByDistribution( createLocalByByHashColumns(rightShuffleDistribution.getShuffleColumns())); PhysicalPropertySet leftLocalProperty = createPropertySetByDistribution( createLocalByByHashColumns(leftShuffleDistribution.getShuffleColumns())); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftLocalProperty, rightLocalProperty))); } else { if (!colocateIndex.isSameGroup(left.getTable().getId(), right.getTable().getId())) { return; } ColocateTableIndex.GroupId groupId = colocateIndex.getGroup(left.getTable().getId()); if (colocateIndex.isGroupUnstable(groupId)) { return; } HashDistributionSpec leftScanDistribution = left.getDistributionSpec(); HashDistributionSpec rightScanDistribution = right.getDistributionSpec(); Preconditions.checkState(leftScanDistribution.getShuffleColumns().size() == rightScanDistribution.getShuffleColumns().size()); if (!leftShuffleDistribution.getShuffleColumns().containsAll(leftScanDistribution.getShuffleColumns())) { return; } if (!rightShuffleDistribution.getShuffleColumns().containsAll(rightScanDistribution.getShuffleColumns())) { return; } for (int i = 0; i < leftScanDistribution.getShuffleColumns().size(); i++) { int leftScanColumnId = leftScanDistribution.getShuffleColumns().get(i); int leftIndex = leftShuffleDistribution.getShuffleColumns().indexOf(leftScanColumnId); int rightScanColumnId = rightScanDistribution.getShuffleColumns().get(i); int rightIndex = rightShuffleDistribution.getShuffleColumns().indexOf(rightScanColumnId); if (leftIndex != rightIndex) { return; } } PhysicalPropertySet rightLocalProperty = createPropertySetByDistribution( createLocalByByHashColumns(rightScanDistribution.getShuffleColumns())); PhysicalPropertySet leftLocalProperty = createPropertySetByDistribution( createLocalByByHashColumns(leftScanDistribution.getShuffleColumns())); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftLocalProperty, rightLocalProperty))); } } /* * Bucket-shuffle will required left-children support local properties by topdown * All shuffle columns(predicate columns) must come from one table * Can't support case such as (The predicate columns combinations is N*N): * JOIN(s1.A = s3.A AND s2.A = s4.A) * / \ * JOIN JOIN * / \ / \ * s1 s2 s3 s4 * * support case: * JOIN(s1.A = s3.A AND s1.B = s3.B) * / \ * JOIN JOIN * / \ / \ * s1 s2 s3 s4 * * */ private void tryBucketShuffle(PhysicalHashJoinOperator node, HashDistributionSpec leftShuffleDistribution, HashDistributionSpec rightShuffleDistribution) { JoinOperator nodeJoinType = node.getJoinType(); if (nodeJoinType.isCrossJoin()) { return; } Optional<LogicalOlapScanOperator> leftTable = findLogicalOlapScanOperator(leftShuffleDistribution); if (!leftTable.isPresent()) { return; } LogicalOlapScanOperator left = leftTable.get(); if (left.getSelectedPartitionId().size() != 1) { return; } HashDistributionSpec leftScanDistribution = left.getDistributionSpec(); if (!leftShuffleDistribution.getShuffleColumns().containsAll(leftScanDistribution.getShuffleColumns())) { return; } List<Integer> rightBucketShuffleColumns = Lists.newArrayList(); for (int leftScanColumn : leftScanDistribution.getShuffleColumns()) { int index = leftShuffleDistribution.getShuffleColumns().indexOf(leftScanColumn); rightBucketShuffleColumns.add(rightShuffleDistribution.getShuffleColumns().get(index)); } List<Integer> leftLocalColumns = Lists.newArrayList(leftScanDistribution.getShuffleColumns()); PhysicalPropertySet rightBucketShuffleProperty = createPropertySetByDistribution( DistributionSpec.createHashDistributionSpec(new HashDistributionDesc(rightBucketShuffleColumns, HashDistributionDesc.SourceType.BUCKET_JOIN))); PhysicalPropertySet leftLocalProperty = createPropertySetByDistribution(createLocalByByHashColumns(leftLocalColumns)); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftLocalProperty, rightBucketShuffleProperty))); } private Optional<LogicalOlapScanOperator> findLogicalOlapScanOperator(HashDistributionSpec distributionSpec) { /* * All shuffle columns must come from one table * */ List<ColumnRefOperator> shuffleColumns = distributionSpec.getShuffleColumns().stream().map(d -> context.getColumnRefFactory().getColumnRef(d)) .collect(Collectors.toList()); for (LogicalOlapScanOperator scanOperator : taskContext.getAllScanOperators()) { if (scanOperator.getOutputColumns().containsAll(shuffleColumns)) { return Optional.of(scanOperator); } } return Optional.empty(); } private HashDistributionSpec createLocalByByHashColumns(List<Integer> hashColumns) { HashDistributionDesc hashDesc = new HashDistributionDesc(hashColumns, HashDistributionDesc.SourceType.LOCAL); return DistributionSpec.createHashDistributionSpec(hashDesc); } private Optional<HashDistributionDesc> getRequiredLocalDesc() { if (!requirements.getDistributionProperty().isShuffle()) { return Optional.empty(); } HashDistributionDesc requireDistributionDesc = ((HashDistributionSpec) requirements.getDistributionProperty().getSpec()).getHashDistributionDesc(); if (!HashDistributionDesc.SourceType.LOCAL.equals(requireDistributionDesc.getSourceType())) { return Optional.empty(); } return Optional.of(requireDistributionDesc); } private Optional<GatherDistributionSpec> getRequiredGatherDesc() { if (!requirements.getDistributionProperty().isGather()) { return Optional.empty(); } GatherDistributionSpec requireDistributionDesc = ((GatherDistributionSpec) requirements.getDistributionProperty().getSpec()); return Optional.of(requireDistributionDesc); } private Optional<HashDistributionDesc> getRequiredShuffleJoinDesc() { if (!requirements.getDistributionProperty().isShuffle()) { return Optional.empty(); } HashDistributionDesc requireDistributionDesc = ((HashDistributionSpec) requirements.getDistributionProperty().getSpec()).getHashDistributionDesc(); if (!HashDistributionDesc.SourceType.SHUFFLE_JOIN.equals(requireDistributionDesc.getSourceType())) { return Optional.empty(); } return Optional.of(requireDistributionDesc); } private Void visitJoinRequirements(PhysicalHashJoinOperator node, ExpressionContext context) { Optional<GatherDistributionSpec> requiredGatherDistribution = getRequiredGatherDesc(); if (requiredGatherDistribution.isPresent()) { return tryGatherForBroadcastJoin(node, context); } Optional<HashDistributionDesc> required = getRequiredLocalDesc(); if (!required.isPresent()) { return visitOperator(node, context); } HashDistributionDesc requireDistributionDesc = required.get(); ColumnRefSet requiredLocalColumns = new ColumnRefSet(); requireDistributionDesc.getColumns().forEach(requiredLocalColumns::union); ColumnRefSet leftChildColumns = context.getChildOutputColumns(0); ColumnRefSet rightChildColumns = context.getChildOutputColumns(1); boolean requiredLocalColumnsFromLeft = leftChildColumns.contains(requiredLocalColumns); boolean requiredLocalColumnsFromRight = rightChildColumns.contains(requiredLocalColumns); if (requiredLocalColumnsFromLeft == requiredLocalColumnsFromRight) { outputInputProps.clear(); return visitOperator(node, context); } List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> result = Lists.newArrayList(); if (requiredLocalColumnsFromLeft) { for (Pair<PhysicalPropertySet, List<PhysicalPropertySet>> outputInputProp : outputInputProps) { PhysicalPropertySet left = outputInputProp.second.get(0); PhysicalPropertySet right = outputInputProp.second.get(1); if (left.getDistributionProperty().isAny()) { result.add( new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements(), right))); } else if (left.getDistributionProperty().isShuffle()) { HashDistributionDesc desc = ((HashDistributionSpec) left.getDistributionProperty().getSpec()).getHashDistributionDesc(); if (desc.getSourceType() == HashDistributionDesc.SourceType.LOCAL && requireDistributionDesc.getColumns().containsAll(desc.getColumns())) { result.add(new Pair<>(distributeRequirements(), outputInputProp.second)); } } } } else { for (Pair<PhysicalPropertySet, List<PhysicalPropertySet>> outputInputProp : outputInputProps) { PhysicalPropertySet right = outputInputProp.second.get(1); if (right.getDistributionProperty().isShuffle()) { HashDistributionDesc desc = ((HashDistributionSpec) right.getDistributionProperty().getSpec()) .getHashDistributionDesc(); if (desc.getSourceType() == HashDistributionDesc.SourceType.LOCAL && requireDistributionDesc.getColumns().containsAll(desc.getColumns())) { result.add(new Pair<>(distributeRequirements(), outputInputProp.second)); } } } } outputInputProps = result; return visitOperator(node, context); } @Override public Void visitPhysicalProject(PhysicalProjectOperator node, ExpressionContext context) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitOperator(node, context); } @Override public Void visitPhysicalHashAggregate(PhysicalHashAggregateOperator node, ExpressionContext context) { if (ConnectContext.get().getSessionVariable().getNewPlannerAggStage() == 0 && context.getRootProperty().isExecuteInOneInstance() && node.getType().isGlobal() && !node.isSplit()) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitAggregateRequirements(node, context); } LogicalOperator child = (LogicalOperator) context.getChildOperator(0); if (child.hasLimit() && (node.getType().isGlobal() && !node.isSplit())) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(createLimitGatherProperty(child.getLimit())))); return visitAggregateRequirements(node, context); } if (!node.getType().isLocal()) { List<Integer> columns = node.getPartitionByColumns().stream().map(ColumnRefOperator::getId).collect( Collectors.toList()); if (columns.isEmpty()) { DistributionProperty distributionProperty = new DistributionProperty(DistributionSpec.createGatherDistributionSpec()); outputInputProps.add(new Pair<>(new PhysicalPropertySet(distributionProperty), Lists.newArrayList(new PhysicalPropertySet(distributionProperty)))); return visitAggregateRequirements(node, context); } DistributionSpec distributionSpec = DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(columns, HashDistributionDesc.SourceType.SHUFFLE_AGG)); DistributionProperty distributionProperty = new DistributionProperty(distributionSpec); outputInputProps.add(new Pair<>(new PhysicalPropertySet(distributionProperty), Lists.newArrayList(new PhysicalPropertySet(distributionProperty)))); DistributionSpec localSpec = DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(columns, HashDistributionDesc.SourceType.LOCAL)); DistributionProperty localProperty = new DistributionProperty(localSpec); outputInputProps.add(new Pair<>(new PhysicalPropertySet(localProperty), Lists.newArrayList(new PhysicalPropertySet(localProperty)))); return visitAggregateRequirements(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitAggregateRequirements(node, context); } private Void visitAggregateRequirements(PhysicalHashAggregateOperator node, ExpressionContext context) { Optional<HashDistributionDesc> required = getRequiredLocalDesc(); if (!required.isPresent()) { return visitOperator(node, context); } HashDistributionDesc requireDistributionDesc = required.get(); ColumnRefSet requiredLocalColumns = new ColumnRefSet(); requireDistributionDesc.getColumns().forEach(requiredLocalColumns::union); List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> result = Lists.newArrayList(); for (Pair<PhysicalPropertySet, List<PhysicalPropertySet>> outputInputProp : outputInputProps) { PhysicalPropertySet input = outputInputProp.second.get(0); if (input.getDistributionProperty().isAny()) { result.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); } else if (input.getDistributionProperty().isShuffle()) { HashDistributionDesc outputDesc = ((HashDistributionSpec) outputInputProp.first.getDistributionProperty().getSpec()) .getHashDistributionDesc(); HashDistributionDesc inputDesc = ((HashDistributionSpec) input.getDistributionProperty().getSpec()).getHashDistributionDesc(); if (outputDesc.getSourceType() == HashDistributionDesc.SourceType.LOCAL && inputDesc.getSourceType() == HashDistributionDesc.SourceType.LOCAL && requireDistributionDesc.getColumns().containsAll(inputDesc.getColumns())) { result.add(new Pair<>(distributeRequirements(), outputInputProp.second)); } } } outputInputProps = result; return visitOperator(node, context); } @Override public Void visitPhysicalOlapScan(PhysicalOlapScanOperator node, ExpressionContext context) { HashDistributionSpec hashDistributionSpec = node.getDistributionSpec(); ColocateTableIndex colocateIndex = Catalog.getCurrentColocateIndex(); if (node.getSelectedPartitionId().size() > 1 && !colocateIndex.isColocateTable(node.getTable().getId())) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); } else { outputInputProps .add(new Pair<>(createPropertySetByDistribution(hashDistributionSpec), Lists.newArrayList())); } Optional<HashDistributionDesc> required = getRequiredLocalDesc(); if (!required.isPresent()) { return visitOperator(node, context); } outputInputProps.clear(); HashDistributionDesc requireDistributionDesc = required.get(); if (requireDistributionDesc.getColumns().containsAll(hashDistributionSpec.getShuffleColumns())) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList())); } return visitOperator(node, context); } @Override public Void visitPhysicalTopN(PhysicalTopNOperator topN, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(topN, context); } PhysicalPropertySet outputProperty; if (topN.getSortPhase().isFinal()) { if (topN.isSplit()) { DistributionSpec distributionSpec = DistributionSpec.createGatherDistributionSpec(); DistributionProperty distributionProperty = new DistributionProperty(distributionSpec); SortProperty sortProperty = new SortProperty(topN.getOrderSpec()); outputProperty = new PhysicalPropertySet(distributionProperty, sortProperty); } else { outputProperty = new PhysicalPropertySet(new SortProperty(topN.getOrderSpec())); } } else { outputProperty = new PhysicalPropertySet(); } LogicalOperator child = (LogicalOperator) context.getChildOperator(0); if (child.hasLimit() && (topN.getSortPhase().isFinal() && !topN.isSplit())) { PhysicalPropertySet inputProperty = createLimitGatherProperty(child.getLimit()); outputInputProps.add(new Pair<>(outputProperty, Lists.newArrayList(inputProperty))); } else { outputInputProps.add(new Pair<>(outputProperty, Lists.newArrayList(PhysicalPropertySet.EMPTY))); } return visitOperator(topN, context); } @Override public Void visitPhysicalHiveScan(PhysicalHiveScanOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalSchemaScan(PhysicalSchemaScanOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalMysqlScan(PhysicalMysqlScanOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalEsScan(PhysicalEsScanOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalAssertOneRow(PhysicalAssertOneRowOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } DistributionSpec gather = DistributionSpec.createGatherDistributionSpec(); DistributionProperty inputProperty = new DistributionProperty(gather); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(new PhysicalPropertySet(inputProperty)))); return visitOperator(node, context); } @Override public Void visitPhysicalAnalytic(PhysicalWindowOperator node, ExpressionContext context) { List<Integer> partitionColumnRefSet = new ArrayList<>(); List<Ordering> orderings = new ArrayList<>(); node.getPartitionExpressions().forEach(e -> { partitionColumnRefSet .addAll(Arrays.stream(e.getUsedColumns().getColumnIds()).boxed().collect(Collectors.toList())); orderings.add(new Ordering((ColumnRefOperator) e, true, true)); }); node.getOrderByElements().forEach(o -> { if (orderings.stream().noneMatch(ordering -> ordering.getColumnRef().equals(o.getColumnRef()))) { orderings.add(o); } }); SortProperty sortProperty = new SortProperty(new OrderSpec(orderings)); Optional<HashDistributionDesc> required = getRequiredLocalDesc(); if (required.isPresent()) { if (!partitionColumnRefSet.isEmpty() && required.get().getColumns().containsAll(partitionColumnRefSet)) { DistributionProperty localProperty = new DistributionProperty(DistributionSpec .createHashDistributionSpec(new HashDistributionDesc(partitionColumnRefSet, HashDistributionDesc.SourceType.LOCAL))); outputInputProps.add(new Pair<>(new PhysicalPropertySet(localProperty, sortProperty), Lists.newArrayList(new PhysicalPropertySet(localProperty, sortProperty)))); } return visitOperator(node, context); } if (partitionColumnRefSet.isEmpty()) { DistributionProperty distributionProperty = new DistributionProperty(DistributionSpec.createGatherDistributionSpec()); outputInputProps.add(new Pair<>(new PhysicalPropertySet(distributionProperty, sortProperty), Lists.newArrayList(new PhysicalPropertySet(distributionProperty, sortProperty)))); } else { DistributionProperty distributionProperty = new DistributionProperty(DistributionSpec .createHashDistributionSpec( new HashDistributionDesc(partitionColumnRefSet, HashDistributionDesc.SourceType.SHUFFLE_AGG))); outputInputProps.add(new Pair<>(new PhysicalPropertySet(distributionProperty, sortProperty), Lists.newArrayList(new PhysicalPropertySet(distributionProperty, sortProperty)))); DistributionProperty localProperty = new DistributionProperty(DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(partitionColumnRefSet, HashDistributionDesc.SourceType.LOCAL))); outputInputProps.add(new Pair<>(new PhysicalPropertySet(localProperty, sortProperty), Lists.newArrayList(new PhysicalPropertySet(localProperty, sortProperty)))); } return visitOperator(node, context); } @Override public Void visitPhysicalUnion(PhysicalUnionOperator node, ExpressionContext context) { processSetOperationChildProperty(context); return visitOperator(node, context); } @Override public Void visitPhysicalExcept(PhysicalExceptOperator node, ExpressionContext context) { processSetOperationChildProperty(context); return visitOperator(node, context); } @Override public Void visitPhysicalIntersect(PhysicalIntersectOperator node, ExpressionContext context) { processSetOperationChildProperty(context); return visitOperator(node, context); } private void processSetOperationChildProperty(ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return; } List<PhysicalPropertySet> childProperty = new ArrayList<>(); for (int i = 0; i < context.arity(); ++i) { LogicalOperator child = (LogicalOperator) context.getChildOperator(i); if (child.hasLimit()) { childProperty.add(createLimitGatherProperty(child.getLimit())); } else { childProperty.add(PhysicalPropertySet.EMPTY); } } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, childProperty)); } @Override public Void visitPhysicalValues(PhysicalValuesOperator node, ExpressionContext context) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalRepeat(PhysicalRepeatOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitOperator(node, context); } @Override public Void visitPhysicalFilter(PhysicalFilterOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitOperator(node, context); } @Override public Void visitPhysicalTableFunction(PhysicalTableFunctionOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitOperator(node, context); } private PhysicalPropertySet createLimitGatherProperty(long limit) { DistributionSpec distributionSpec = DistributionSpec.createGatherDistributionSpec(limit); DistributionProperty distributionProperty = new DistributionProperty(distributionSpec); return new PhysicalPropertySet(distributionProperty, SortProperty.EMPTY); } private PhysicalPropertySet createPropertySetByDistribution(DistributionSpec distributionSpec) { DistributionProperty distributionProperty = new DistributionProperty(distributionSpec); return new PhysicalPropertySet(distributionProperty); } }
class ChildPropertyDeriver extends OperatorVisitor<Void, ExpressionContext> { private PhysicalPropertySet requirements; private List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> outputInputProps; private final TaskContext taskContext; private final OptimizerContext context; public ChildPropertyDeriver(TaskContext taskContext) { this.taskContext = taskContext; this.context = taskContext.getOptimizerContext(); } public List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> getOutputInputProps( PhysicalPropertySet requirements, GroupExpression groupExpression) { this.requirements = requirements; outputInputProps = Lists.newArrayList(); groupExpression.getOp().accept(this, new ExpressionContext(groupExpression)); return outputInputProps; } private PhysicalPropertySet distributeRequirements() { return new PhysicalPropertySet(requirements.getDistributionProperty()); } @Override public Void visitOperator(Operator node, ExpressionContext context) { return null; } @Override public Void visitPhysicalHashJoin(PhysicalHashJoinOperator node, ExpressionContext context) { String hint = node.getJoinHint(); PhysicalPropertySet rightBroadcastProperty = new PhysicalPropertySet(new DistributionProperty(DistributionSpec.createReplicatedDistributionSpec())); LogicalOperator leftChild = (LogicalOperator) context.getChildOperator(0); LogicalOperator rightChild = (LogicalOperator) context.getChildOperator(1); if (leftChild.hasLimit() || rightChild.hasLimit()) { if (leftChild.hasLimit()) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(createLimitGatherProperty(leftChild.getLimit()), rightBroadcastProperty))); } else { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(new PhysicalPropertySet(), rightBroadcastProperty))); } return visitJoinRequirements(node, context); } else { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(new PhysicalPropertySet(), rightBroadcastProperty))); } ColumnRefSet leftChildColumns = context.getChildOutputColumns(0); ColumnRefSet rightChildColumns = context.getChildOutputColumns(1); List<BinaryPredicateOperator> equalOnPredicate = getEqConj(leftChildColumns, rightChildColumns, Utils.extractConjuncts(node.getJoinPredicate())); if (node.getJoinType().isCrossJoin() || JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN.equals(node.getJoinType()) || (node.getJoinType().isInnerJoin() && equalOnPredicate.isEmpty()) || "BROADCAST".equalsIgnoreCase(hint)) { return visitJoinRequirements(node, context); } if (node.getJoinType().isRightJoin() || node.getJoinType().isFullOuterJoin() || "SHUFFLE".equalsIgnoreCase(hint)) { outputInputProps.clear(); } List<Integer> leftOnPredicateColumns = new ArrayList<>(); List<Integer> rightOnPredicateColumns = new ArrayList<>(); JoinPredicateUtils.getJoinOnPredicatesColumns(equalOnPredicate, leftChildColumns, rightChildColumns, leftOnPredicateColumns, rightOnPredicateColumns); Preconditions.checkState(leftOnPredicateColumns.size() == rightOnPredicateColumns.size()); HashDistributionSpec leftDistribution = DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(leftOnPredicateColumns, HashDistributionDesc.SourceType.SHUFFLE_JOIN)); HashDistributionSpec rightDistribution = DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(rightOnPredicateColumns, HashDistributionDesc.SourceType.SHUFFLE_JOIN)); doHashShuffle(equalOnPredicate, leftDistribution, rightDistribution); if ("SHUFFLE".equalsIgnoreCase(hint)) { return visitJoinRequirements(node, context); } if (equalOnPredicate.stream().anyMatch(p -> !isColumnToColumnBinaryPredicate(p))) { return visitJoinRequirements(node, context); } if (!"BUCKET".equalsIgnoreCase(hint)) { tryColocate(leftDistribution, rightDistribution); } tryBucketShuffle(node, leftDistribution, rightDistribution); return visitJoinRequirements(node, context); } private void doHashShuffle(List<BinaryPredicateOperator> equalOnPredicate, HashDistributionSpec leftDistribution, HashDistributionSpec rightDistribution) { if (equalOnPredicate.stream().anyMatch(p -> !isColumnToColumnBinaryPredicate(p))) { PhysicalPropertySet leftProperty = createPropertySetByDistribution(new HashDistributionSpec( new HashDistributionDesc(leftDistribution.getShuffleColumns(), HashDistributionDesc.SourceType.FORCE_SHUFFLE_JOIN))); PhysicalPropertySet rightProperty = createPropertySetByDistribution(new HashDistributionSpec( new HashDistributionDesc(rightDistribution.getShuffleColumns(), HashDistributionDesc.SourceType.FORCE_SHUFFLE_JOIN))); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftProperty, rightProperty))); return; } PhysicalPropertySet leftInputProperty = createPropertySetByDistribution(leftDistribution); PhysicalPropertySet rightInputProperty = createPropertySetByDistribution(rightDistribution); Optional<HashDistributionDesc> requiredShuffleDesc = getRequiredShuffleJoinDesc(); if (!requiredShuffleDesc.isPresent()) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftInputProperty, rightInputProperty))); return; } HashDistributionDesc requiredDesc = requiredShuffleDesc.get(); List<Integer> leftColumns = leftDistribution.getShuffleColumns(); List<Integer> rightColumns = rightDistribution.getShuffleColumns(); List<Integer> requiredColumns = requiredDesc.getColumns(); Preconditions.checkState(leftColumns.size() == rightColumns.size()); boolean checkLeft = leftColumns.containsAll(requiredColumns) && leftColumns.size() == requiredColumns.size(); boolean checkRight = rightColumns.containsAll(requiredColumns) && rightColumns.size() == requiredColumns.size(); if (checkLeft || checkRight) { List<Integer> requiredLeft = Lists.newArrayList(); List<Integer> requiredRight = Lists.newArrayList(); for (Integer cid : requiredColumns) { int idx = checkLeft ? leftColumns.indexOf(cid) : rightColumns.indexOf(cid); requiredLeft.add(leftColumns.get(idx)); requiredRight.add(rightColumns.get(idx)); } PhysicalPropertySet leftShuffleProperty = createPropertySetByDistribution( DistributionSpec.createHashDistributionSpec(new HashDistributionDesc(requiredLeft, HashDistributionDesc.SourceType.SHUFFLE_JOIN))); PhysicalPropertySet rightShuffleProperty = createPropertySetByDistribution( DistributionSpec.createHashDistributionSpec(new HashDistributionDesc(requiredRight, HashDistributionDesc.SourceType.SHUFFLE_JOIN))); outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(leftShuffleProperty, rightShuffleProperty))); return; } outputInputProps .add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftInputProperty, rightInputProperty))); } /* * Colocate will required children support local properties by topdown * All shuffle columns(predicate columns) must come from one table * Can't support case such as (The predicate columns combinations is N*N): * JOIN(s1.A = s3.A AND s2.A = s4.A) * / \ * JOIN JOIN * / \ / \ * s1 s2 s3 s4 * * support case: * JOIN(s1.A = s3.A AND s1.B = s3.B) * / \ * JOIN JOIN * / \ / \ * s1 s2 s3 s4 * * */ private void tryColocate(HashDistributionSpec leftShuffleDistribution, HashDistributionSpec rightShuffleDistribution) { if (Config.disable_colocate_join || ConnectContext.get().getSessionVariable().isDisableColocateJoin()) { return; } Optional<LogicalOlapScanOperator> leftTable = findLogicalOlapScanOperator(leftShuffleDistribution); if (!leftTable.isPresent()) { return; } LogicalOlapScanOperator left = leftTable.get(); Optional<LogicalOlapScanOperator> rightTable = findLogicalOlapScanOperator(rightShuffleDistribution); if (!rightTable.isPresent()) { return; } LogicalOlapScanOperator right = rightTable.get(); ColocateTableIndex colocateIndex = Catalog.getCurrentColocateIndex(); if (left.getTable().getId() == right.getTable().getId() && !colocateIndex.isSameGroup(left.getTable().getId(), right.getTable().getId())) { if (!left.getSelectedPartitionId().equals(right.getSelectedPartitionId()) || left.getSelectedPartitionId().size() > 1) { return; } PhysicalPropertySet rightLocalProperty = createPropertySetByDistribution( createLocalByByHashColumns(rightShuffleDistribution.getShuffleColumns())); PhysicalPropertySet leftLocalProperty = createPropertySetByDistribution( createLocalByByHashColumns(leftShuffleDistribution.getShuffleColumns())); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftLocalProperty, rightLocalProperty))); } else { if (!colocateIndex.isSameGroup(left.getTable().getId(), right.getTable().getId())) { return; } ColocateTableIndex.GroupId groupId = colocateIndex.getGroup(left.getTable().getId()); if (colocateIndex.isGroupUnstable(groupId)) { return; } HashDistributionSpec leftScanDistribution = left.getDistributionSpec(); HashDistributionSpec rightScanDistribution = right.getDistributionSpec(); Preconditions.checkState(leftScanDistribution.getShuffleColumns().size() == rightScanDistribution.getShuffleColumns().size()); if (!leftShuffleDistribution.getShuffleColumns().containsAll(leftScanDistribution.getShuffleColumns())) { return; } if (!rightShuffleDistribution.getShuffleColumns().containsAll(rightScanDistribution.getShuffleColumns())) { return; } for (int i = 0; i < leftScanDistribution.getShuffleColumns().size(); i++) { int leftScanColumnId = leftScanDistribution.getShuffleColumns().get(i); int leftIndex = leftShuffleDistribution.getShuffleColumns().indexOf(leftScanColumnId); int rightScanColumnId = rightScanDistribution.getShuffleColumns().get(i); int rightIndex = rightShuffleDistribution.getShuffleColumns().indexOf(rightScanColumnId); if (leftIndex != rightIndex) { return; } } PhysicalPropertySet rightLocalProperty = createPropertySetByDistribution( createLocalByByHashColumns(rightScanDistribution.getShuffleColumns())); PhysicalPropertySet leftLocalProperty = createPropertySetByDistribution( createLocalByByHashColumns(leftScanDistribution.getShuffleColumns())); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftLocalProperty, rightLocalProperty))); } } /* * Bucket-shuffle will required left-children support local properties by topdown * All shuffle columns(predicate columns) must come from one table * Can't support case such as (The predicate columns combinations is N*N): * JOIN(s1.A = s3.A AND s2.A = s4.A) * / \ * JOIN JOIN * / \ / \ * s1 s2 s3 s4 * * support case: * JOIN(s1.A = s3.A AND s1.B = s3.B) * / \ * JOIN JOIN * / \ / \ * s1 s2 s3 s4 * * */ private void tryBucketShuffle(PhysicalHashJoinOperator node, HashDistributionSpec leftShuffleDistribution, HashDistributionSpec rightShuffleDistribution) { JoinOperator nodeJoinType = node.getJoinType(); if (nodeJoinType.isCrossJoin()) { return; } Optional<LogicalOlapScanOperator> leftTable = findLogicalOlapScanOperator(leftShuffleDistribution); if (!leftTable.isPresent()) { return; } LogicalOlapScanOperator left = leftTable.get(); if (left.getSelectedPartitionId().size() != 1) { return; } HashDistributionSpec leftScanDistribution = left.getDistributionSpec(); if (!leftShuffleDistribution.getShuffleColumns().containsAll(leftScanDistribution.getShuffleColumns())) { return; } List<Integer> rightBucketShuffleColumns = Lists.newArrayList(); for (int leftScanColumn : leftScanDistribution.getShuffleColumns()) { int index = leftShuffleDistribution.getShuffleColumns().indexOf(leftScanColumn); rightBucketShuffleColumns.add(rightShuffleDistribution.getShuffleColumns().get(index)); } List<Integer> leftLocalColumns = Lists.newArrayList(leftScanDistribution.getShuffleColumns()); PhysicalPropertySet rightBucketShuffleProperty = createPropertySetByDistribution( DistributionSpec.createHashDistributionSpec(new HashDistributionDesc(rightBucketShuffleColumns, HashDistributionDesc.SourceType.BUCKET_JOIN))); PhysicalPropertySet leftLocalProperty = createPropertySetByDistribution(createLocalByByHashColumns(leftLocalColumns)); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(leftLocalProperty, rightBucketShuffleProperty))); } private Optional<LogicalOlapScanOperator> findLogicalOlapScanOperator(HashDistributionSpec distributionSpec) { /* * All shuffle columns must come from one table * */ List<ColumnRefOperator> shuffleColumns = distributionSpec.getShuffleColumns().stream().map(d -> context.getColumnRefFactory().getColumnRef(d)) .collect(Collectors.toList()); for (LogicalOlapScanOperator scanOperator : taskContext.getAllScanOperators()) { if (scanOperator.getOutputColumns().containsAll(shuffleColumns)) { return Optional.of(scanOperator); } } return Optional.empty(); } private HashDistributionSpec createLocalByByHashColumns(List<Integer> hashColumns) { HashDistributionDesc hashDesc = new HashDistributionDesc(hashColumns, HashDistributionDesc.SourceType.LOCAL); return DistributionSpec.createHashDistributionSpec(hashDesc); } private Optional<HashDistributionDesc> getRequiredLocalDesc() { if (!requirements.getDistributionProperty().isShuffle()) { return Optional.empty(); } HashDistributionDesc requireDistributionDesc = ((HashDistributionSpec) requirements.getDistributionProperty().getSpec()).getHashDistributionDesc(); if (!HashDistributionDesc.SourceType.LOCAL.equals(requireDistributionDesc.getSourceType())) { return Optional.empty(); } return Optional.of(requireDistributionDesc); } private Optional<GatherDistributionSpec> getRequiredGatherDesc() { if (!requirements.getDistributionProperty().isGather()) { return Optional.empty(); } GatherDistributionSpec requireDistributionDesc = ((GatherDistributionSpec) requirements.getDistributionProperty().getSpec()); return Optional.of(requireDistributionDesc); } private Optional<HashDistributionDesc> getRequiredShuffleJoinDesc() { if (!requirements.getDistributionProperty().isShuffle()) { return Optional.empty(); } HashDistributionDesc requireDistributionDesc = ((HashDistributionSpec) requirements.getDistributionProperty().getSpec()).getHashDistributionDesc(); if (!HashDistributionDesc.SourceType.SHUFFLE_JOIN.equals(requireDistributionDesc.getSourceType())) { return Optional.empty(); } return Optional.of(requireDistributionDesc); } private Void visitJoinRequirements(PhysicalHashJoinOperator node, ExpressionContext context) { Optional<GatherDistributionSpec> requiredGatherDistribution = getRequiredGatherDesc(); if (requiredGatherDistribution.isPresent()) { return tryGatherForBroadcastJoin(node, context); } Optional<HashDistributionDesc> required = getRequiredLocalDesc(); if (!required.isPresent()) { return visitOperator(node, context); } HashDistributionDesc requireDistributionDesc = required.get(); ColumnRefSet requiredLocalColumns = new ColumnRefSet(); requireDistributionDesc.getColumns().forEach(requiredLocalColumns::union); ColumnRefSet leftChildColumns = context.getChildOutputColumns(0); ColumnRefSet rightChildColumns = context.getChildOutputColumns(1); boolean requiredLocalColumnsFromLeft = leftChildColumns.contains(requiredLocalColumns); boolean requiredLocalColumnsFromRight = rightChildColumns.contains(requiredLocalColumns); if (requiredLocalColumnsFromLeft == requiredLocalColumnsFromRight) { outputInputProps.clear(); return visitOperator(node, context); } List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> result = Lists.newArrayList(); if (requiredLocalColumnsFromLeft) { for (Pair<PhysicalPropertySet, List<PhysicalPropertySet>> outputInputProp : outputInputProps) { PhysicalPropertySet left = outputInputProp.second.get(0); PhysicalPropertySet right = outputInputProp.second.get(1); if (left.getDistributionProperty().isAny()) { result.add( new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements(), right))); } else if (left.getDistributionProperty().isShuffle()) { HashDistributionDesc desc = ((HashDistributionSpec) left.getDistributionProperty().getSpec()).getHashDistributionDesc(); if (desc.getSourceType() == HashDistributionDesc.SourceType.LOCAL && requireDistributionDesc.getColumns().containsAll(desc.getColumns())) { result.add(new Pair<>(distributeRequirements(), outputInputProp.second)); } } } } else { for (Pair<PhysicalPropertySet, List<PhysicalPropertySet>> outputInputProp : outputInputProps) { PhysicalPropertySet right = outputInputProp.second.get(1); if (right.getDistributionProperty().isShuffle()) { HashDistributionDesc desc = ((HashDistributionSpec) right.getDistributionProperty().getSpec()) .getHashDistributionDesc(); if (desc.getSourceType() == HashDistributionDesc.SourceType.LOCAL && requireDistributionDesc.getColumns().containsAll(desc.getColumns())) { result.add(new Pair<>(distributeRequirements(), outputInputProp.second)); } } } } outputInputProps = result; return visitOperator(node, context); } @Override public Void visitPhysicalProject(PhysicalProjectOperator node, ExpressionContext context) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitOperator(node, context); } @Override public Void visitPhysicalHashAggregate(PhysicalHashAggregateOperator node, ExpressionContext context) { if (ConnectContext.get().getSessionVariable().getNewPlannerAggStage() == 0 && context.getRootProperty().isExecuteInOneTablet() && node.getType().isGlobal() && !node.isSplit()) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitAggregateRequirements(node, context); } LogicalOperator child = (LogicalOperator) context.getChildOperator(0); if (child.hasLimit() && (node.getType().isGlobal() && !node.isSplit())) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(createLimitGatherProperty(child.getLimit())))); return visitAggregateRequirements(node, context); } if (!node.getType().isLocal()) { List<Integer> columns = node.getPartitionByColumns().stream().map(ColumnRefOperator::getId).collect( Collectors.toList()); if (columns.isEmpty()) { DistributionProperty distributionProperty = new DistributionProperty(DistributionSpec.createGatherDistributionSpec()); outputInputProps.add(new Pair<>(new PhysicalPropertySet(distributionProperty), Lists.newArrayList(new PhysicalPropertySet(distributionProperty)))); return visitAggregateRequirements(node, context); } DistributionSpec distributionSpec = DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(columns, HashDistributionDesc.SourceType.SHUFFLE_AGG)); DistributionProperty distributionProperty = new DistributionProperty(distributionSpec); outputInputProps.add(new Pair<>(new PhysicalPropertySet(distributionProperty), Lists.newArrayList(new PhysicalPropertySet(distributionProperty)))); DistributionSpec localSpec = DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(columns, HashDistributionDesc.SourceType.LOCAL)); DistributionProperty localProperty = new DistributionProperty(localSpec); outputInputProps.add(new Pair<>(new PhysicalPropertySet(localProperty), Lists.newArrayList(new PhysicalPropertySet(localProperty)))); return visitAggregateRequirements(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitAggregateRequirements(node, context); } private Void visitAggregateRequirements(PhysicalHashAggregateOperator node, ExpressionContext context) { Optional<HashDistributionDesc> required = getRequiredLocalDesc(); if (!required.isPresent()) { return visitOperator(node, context); } HashDistributionDesc requireDistributionDesc = required.get(); ColumnRefSet requiredLocalColumns = new ColumnRefSet(); requireDistributionDesc.getColumns().forEach(requiredLocalColumns::union); List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> result = Lists.newArrayList(); for (Pair<PhysicalPropertySet, List<PhysicalPropertySet>> outputInputProp : outputInputProps) { PhysicalPropertySet input = outputInputProp.second.get(0); if (input.getDistributionProperty().isAny()) { result.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); } else if (input.getDistributionProperty().isShuffle()) { HashDistributionDesc outputDesc = ((HashDistributionSpec) outputInputProp.first.getDistributionProperty().getSpec()) .getHashDistributionDesc(); HashDistributionDesc inputDesc = ((HashDistributionSpec) input.getDistributionProperty().getSpec()).getHashDistributionDesc(); if (outputDesc.getSourceType() == HashDistributionDesc.SourceType.LOCAL && inputDesc.getSourceType() == HashDistributionDesc.SourceType.LOCAL && requireDistributionDesc.getColumns().containsAll(inputDesc.getColumns())) { result.add(new Pair<>(distributeRequirements(), outputInputProp.second)); } } } outputInputProps = result; return visitOperator(node, context); } @Override public Void visitPhysicalOlapScan(PhysicalOlapScanOperator node, ExpressionContext context) { HashDistributionSpec hashDistributionSpec = node.getDistributionSpec(); ColocateTableIndex colocateIndex = Catalog.getCurrentColocateIndex(); if (node.getSelectedPartitionId().size() > 1 && !colocateIndex.isColocateTable(node.getTable().getId())) { outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); } else { outputInputProps .add(new Pair<>(createPropertySetByDistribution(hashDistributionSpec), Lists.newArrayList())); } Optional<HashDistributionDesc> required = getRequiredLocalDesc(); if (!required.isPresent()) { return visitOperator(node, context); } outputInputProps.clear(); HashDistributionDesc requireDistributionDesc = required.get(); if (requireDistributionDesc.getColumns().containsAll(hashDistributionSpec.getShuffleColumns())) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList())); } return visitOperator(node, context); } @Override public Void visitPhysicalTopN(PhysicalTopNOperator topN, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(topN, context); } PhysicalPropertySet outputProperty; if (topN.getSortPhase().isFinal()) { if (topN.isSplit()) { DistributionSpec distributionSpec = DistributionSpec.createGatherDistributionSpec(); DistributionProperty distributionProperty = new DistributionProperty(distributionSpec); SortProperty sortProperty = new SortProperty(topN.getOrderSpec()); outputProperty = new PhysicalPropertySet(distributionProperty, sortProperty); } else { outputProperty = new PhysicalPropertySet(new SortProperty(topN.getOrderSpec())); } } else { outputProperty = new PhysicalPropertySet(); } LogicalOperator child = (LogicalOperator) context.getChildOperator(0); if (child.hasLimit() && (topN.getSortPhase().isFinal() && !topN.isSplit())) { PhysicalPropertySet inputProperty = createLimitGatherProperty(child.getLimit()); outputInputProps.add(new Pair<>(outputProperty, Lists.newArrayList(inputProperty))); } else { outputInputProps.add(new Pair<>(outputProperty, Lists.newArrayList(PhysicalPropertySet.EMPTY))); } return visitOperator(topN, context); } @Override public Void visitPhysicalHiveScan(PhysicalHiveScanOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalSchemaScan(PhysicalSchemaScanOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalMysqlScan(PhysicalMysqlScanOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalEsScan(PhysicalEsScanOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalAssertOneRow(PhysicalAssertOneRowOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return visitOperator(node, context); } DistributionSpec gather = DistributionSpec.createGatherDistributionSpec(); DistributionProperty inputProperty = new DistributionProperty(gather); outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(new PhysicalPropertySet(inputProperty)))); return visitOperator(node, context); } @Override public Void visitPhysicalAnalytic(PhysicalWindowOperator node, ExpressionContext context) { List<Integer> partitionColumnRefSet = new ArrayList<>(); List<Ordering> orderings = new ArrayList<>(); node.getPartitionExpressions().forEach(e -> { partitionColumnRefSet .addAll(Arrays.stream(e.getUsedColumns().getColumnIds()).boxed().collect(Collectors.toList())); orderings.add(new Ordering((ColumnRefOperator) e, true, true)); }); node.getOrderByElements().forEach(o -> { if (orderings.stream().noneMatch(ordering -> ordering.getColumnRef().equals(o.getColumnRef()))) { orderings.add(o); } }); SortProperty sortProperty = new SortProperty(new OrderSpec(orderings)); Optional<HashDistributionDesc> required = getRequiredLocalDesc(); if (required.isPresent()) { if (!partitionColumnRefSet.isEmpty() && required.get().getColumns().containsAll(partitionColumnRefSet)) { DistributionProperty localProperty = new DistributionProperty(DistributionSpec .createHashDistributionSpec(new HashDistributionDesc(partitionColumnRefSet, HashDistributionDesc.SourceType.LOCAL))); outputInputProps.add(new Pair<>(new PhysicalPropertySet(localProperty, sortProperty), Lists.newArrayList(new PhysicalPropertySet(localProperty, sortProperty)))); } return visitOperator(node, context); } if (partitionColumnRefSet.isEmpty()) { DistributionProperty distributionProperty = new DistributionProperty(DistributionSpec.createGatherDistributionSpec()); outputInputProps.add(new Pair<>(new PhysicalPropertySet(distributionProperty, sortProperty), Lists.newArrayList(new PhysicalPropertySet(distributionProperty, sortProperty)))); } else { DistributionProperty distributionProperty = new DistributionProperty(DistributionSpec .createHashDistributionSpec( new HashDistributionDesc(partitionColumnRefSet, HashDistributionDesc.SourceType.SHUFFLE_AGG))); outputInputProps.add(new Pair<>(new PhysicalPropertySet(distributionProperty, sortProperty), Lists.newArrayList(new PhysicalPropertySet(distributionProperty, sortProperty)))); DistributionProperty localProperty = new DistributionProperty(DistributionSpec.createHashDistributionSpec( new HashDistributionDesc(partitionColumnRefSet, HashDistributionDesc.SourceType.LOCAL))); outputInputProps.add(new Pair<>(new PhysicalPropertySet(localProperty, sortProperty), Lists.newArrayList(new PhysicalPropertySet(localProperty, sortProperty)))); } return visitOperator(node, context); } @Override public Void visitPhysicalUnion(PhysicalUnionOperator node, ExpressionContext context) { processSetOperationChildProperty(context); return visitOperator(node, context); } @Override public Void visitPhysicalExcept(PhysicalExceptOperator node, ExpressionContext context) { processSetOperationChildProperty(context); return visitOperator(node, context); } @Override public Void visitPhysicalIntersect(PhysicalIntersectOperator node, ExpressionContext context) { processSetOperationChildProperty(context); return visitOperator(node, context); } private void processSetOperationChildProperty(ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { return; } List<PhysicalPropertySet> childProperty = new ArrayList<>(); for (int i = 0; i < context.arity(); ++i) { LogicalOperator child = (LogicalOperator) context.getChildOperator(i); if (child.hasLimit()) { childProperty.add(createLimitGatherProperty(child.getLimit())); } else { childProperty.add(PhysicalPropertySet.EMPTY); } } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, childProperty)); } @Override public Void visitPhysicalValues(PhysicalValuesOperator node, ExpressionContext context) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList())); return visitOperator(node, context); } @Override public Void visitPhysicalRepeat(PhysicalRepeatOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitOperator(node, context); } @Override public Void visitPhysicalFilter(PhysicalFilterOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitOperator(node, context); } @Override public Void visitPhysicalTableFunction(PhysicalTableFunctionOperator node, ExpressionContext context) { if (getRequiredLocalDesc().isPresent()) { outputInputProps.add(new Pair<>(distributeRequirements(), Lists.newArrayList(distributeRequirements()))); return visitOperator(node, context); } outputInputProps.add(new Pair<>(PhysicalPropertySet.EMPTY, Lists.newArrayList(PhysicalPropertySet.EMPTY))); return visitOperator(node, context); } private PhysicalPropertySet createLimitGatherProperty(long limit) { DistributionSpec distributionSpec = DistributionSpec.createGatherDistributionSpec(limit); DistributionProperty distributionProperty = new DistributionProperty(distributionSpec); return new PhysicalPropertySet(distributionProperty, SortProperty.EMPTY); } private PhysicalPropertySet createPropertySetByDistribution(DistributionSpec distributionSpec) { DistributionProperty distributionProperty = new DistributionProperty(distributionSpec); return new PhysicalPropertySet(distributionProperty); } }
Failed to drop catalog xxxxx
public ShowResultSet visitDropCatalogStatement(DropCatalogStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { String catalogName = stmt.getName(); if (!context.getGlobalStateMgr().getCatalogMgr().catalogExists(catalogName)) { if (stmt.isIfExists()) { LOG.info("drop catalog[{}] which does not exist", catalogName); return; } else { ErrorReport.reportDdlException(ErrorCode.ERR_UNKNOWN_CATALOG, catalogName); } } context.getGlobalStateMgr().getCatalogMgr().dropCatalog(stmt); }); return null; }
LOG.info("drop catalog[{}] which does not exist", catalogName);
public ShowResultSet visitDropCatalogStatement(DropCatalogStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { String catalogName = stmt.getName(); if (!context.getGlobalStateMgr().getCatalogMgr().catalogExists(catalogName)) { if (stmt.isIfExists()) { LOG.info("drop catalog[{}] which does not exist", catalogName); return; } else { ErrorReport.reportDdlException(ErrorCode.ERR_BAD_CATALOG_ERROR, catalogName); } } context.getGlobalStateMgr().getCatalogMgr().dropCatalog(stmt); }); return null; }
class StmtExecutorVisitor implements AstVisitor<ShowResultSet, ConnectContext> { private static final Logger LOG = LogManager.getLogger(StmtExecutorVisitor.class); private static final StmtExecutorVisitor INSTANCE = new StmtExecutorVisitor(); public static StmtExecutorVisitor getInstance() { return INSTANCE; } protected StmtExecutorVisitor() { } @Override public ShowResultSet visitNode(ParseNode node, ConnectContext context) { throw new RuntimeException(new DdlException("unsupported statement: " + node.toSql())); } @Override public ShowResultSet visitCreateDbStatement(CreateDbStmt stmt, ConnectContext context) { String fullDbName = stmt.getFullDbName(); String catalogName = stmt.getCatalogName(); Map<String, String> properties = stmt.getProperties(); boolean isSetIfNotExists = stmt.isSetIfNotExists(); ErrorReport.wrapWithRuntimeException(() -> { try { context.getGlobalStateMgr().getMetadataMgr().createDb(catalogName, fullDbName, properties); } catch (AlreadyExistsException e) { if (isSetIfNotExists) { LOG.info("create database[{}] which already exists", fullDbName); } else { ErrorReport.reportDdlException(ErrorCode.ERR_DB_CREATE_EXISTS, fullDbName); } } }); return null; } @Override public ShowResultSet visitDropDbStatement(DropDbStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { String catalogName = stmt.getCatalogName(); String dbName = stmt.getDbName(); boolean isForceDrop = stmt.isForceDrop(); try { context.getGlobalStateMgr().getMetadataMgr().dropDb(catalogName, dbName, isForceDrop); } catch (MetaNotFoundException e) { if (stmt.isSetIfExists()) { LOG.info("drop database[{}] which does not exist", dbName); } else { ErrorReport.reportDdlException(ErrorCode.ERR_DB_DROP_EXISTS, dbName); } } }); return null; } @Override public ShowResultSet visitCreateFunctionStatement(CreateFunctionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { FunctionName name = stmt.getFunctionName(); if (name.isGlobalFunction()) { context.getGlobalStateMgr() .getGlobalFunctionMgr() .userAddFunction(stmt.getFunction(), stmt.shouldReplaceIfExists()); } else { Database db = context.getGlobalStateMgr().getDb(name.getDb()); if (db == null) { ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, name.getDb()); } db.addFunction(stmt.getFunction(), stmt.shouldReplaceIfExists()); } }); return null; } @Override public ShowResultSet visitDropFunctionStatement(DropFunctionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { FunctionName name = stmt.getFunctionName(); if (name.isGlobalFunction()) { context.getGlobalStateMgr().getGlobalFunctionMgr().userDropFunction(stmt.getFunctionSearchDesc()); } else { Database db = context.getGlobalStateMgr().getDb(name.getDb()); if (db == null) { ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, name.getDb()); } db.dropFunction(stmt.getFunctionSearchDesc()); } }); return null; } @Override public ShowResultSet visitCreateTableStatement(CreateTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().createTable(stmt); }); return null; } @Override public ShowResultSet visitCreateTemporaryTableStatement(CreateTemporaryTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().createTemporaryTable(stmt); }); return null; } @Override public ShowResultSet visitCreateTableLikeStatement(CreateTableLikeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().createTableLike(stmt); }); return null; } @Override public ShowResultSet visitCreateTemporaryTableLikeStatement( CreateTemporaryTableLikeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr() .createTemporaryTable((CreateTemporaryTableStmt) stmt.getCreateTableStmt()); }); return null; } @Override public ShowResultSet visitDropTableStatement(DropTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { if (stmt.getTemporaryTableMark()) { DropTemporaryTableStmt dropTemporaryTableStmt = new DropTemporaryTableStmt( stmt.isSetIfExists(), stmt.getTbl(), stmt.isForceDrop()); dropTemporaryTableStmt.setSessionId(context.getSessionId()); context.getGlobalStateMgr().getMetadataMgr().dropTemporaryTable(dropTemporaryTableStmt); } else { context.getGlobalStateMgr().getMetadataMgr().dropTable(stmt); } }); return null; } @Override public ShowResultSet visitDropTemporaryTableStatement(DropTemporaryTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().dropTemporaryTable(stmt); }); return null; } @Override public ShowResultSet visitCleanTemporaryTableStatement(CleanTemporaryTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().cleanTemporaryTables(stmt); }); return null; } @Override public ShowResultSet visitCreateMaterializedViewStmt(CreateMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().createMaterializedView(stmt); }); return null; } @Override public ShowResultSet visitCreateMaterializedViewStatement(CreateMaterializedViewStatement stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().createMaterializedView(stmt); }); return null; } @Override public ShowResultSet visitDropMaterializedViewStatement(DropMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().dropMaterializedView(stmt); }); return null; } @Override public ShowResultSet visitAlterMaterializedViewStatement(AlterMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().alterMaterializedView(stmt); }); return null; } @Override public ShowResultSet visitRefreshMaterializedViewStatement(RefreshMaterializedViewStatement stmt, ConnectContext context) { List<String> info = Lists.newArrayList(); ErrorReport.wrapWithRuntimeException(() -> { String taskRunId = context.getGlobalStateMgr().getLocalMetastore().refreshMaterializedView(stmt); info.add(taskRunId); }); return new ShowResultSet(RefreshMaterializedViewStatement.META_DATA, Arrays.asList(info)); } @Override public ShowResultSet visitCancelRefreshMaterializedViewStatement(CancelRefreshMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore() .cancelRefreshMaterializedView( stmt.getMvName().getDb(), stmt.getMvName().getTbl()); }); return null; } @Override public ShowResultSet visitAlterTableStatement(AlterTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().alterTable(stmt); }); return null; } @Override public ShowResultSet visitAlterViewStatement(AlterViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().alterView(stmt); }); return null; } @Override public ShowResultSet visitCancelAlterTableStatement(CancelAlterTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().cancelAlter(stmt); }); return null; } @Override public ShowResultSet visitLoadStatement(LoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { EtlJobType jobType = stmt.getEtlJobType(); if (jobType == EtlJobType.UNKNOWN) { throw new DdlException("Unknown load job type"); } if (jobType == EtlJobType.HADOOP && Config.disable_hadoop_load) { throw new DdlException("Load job by hadoop cluster is disabled." + " Try using broker load. See 'help broker load;'"); } context.getGlobalStateMgr().getLoadMgr().createLoadJobFromStmt(stmt, context); }); return null; } @Override public ShowResultSet visitCancelLoadStatement(CancelLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLoadMgr().cancelLoadJob(stmt); }); return null; } @Override public ShowResultSet visitCancelCompactionStatement(CancelCompactionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getCompactionMgr().cancelCompaction(stmt.getTxnId()); }); return null; } @Override public ShowResultSet visitCreateRoutineLoadStatement(CreateRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().createRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitPauseRoutineLoadStatement(PauseRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().pauseRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitResumeRoutineLoadStatement(ResumeRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().resumeRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitStopRoutineLoadStatement(StopRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().stopRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitAlterRoutineLoadStatement(AlterRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().alterRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitAlterLoadStatement(AlterLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLoadMgr().alterLoadJob(stmt); }); return null; } @Override public ShowResultSet visitCreateUserStatement(CreateUserStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthenticationMgr().createUser(stmt); }); return null; } @Override public ShowResultSet visitAlterUserStatement(AlterUserStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthenticationMgr() .alterUser(stmt.getUserIdentity(), stmt.getAuthenticationInfo()); }); return null; } @Override public ShowResultSet visitDropUserStatement(DropUserStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthenticationMgr().dropUser(stmt); }); return null; } @Override public ShowResultSet visitGrantRevokeRoleStatement(BaseGrantRevokeRoleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { if (stmt instanceof GrantRoleStmt) { context.getGlobalStateMgr().getAuthorizationMgr().grantRole((GrantRoleStmt) stmt); } else { context.getGlobalStateMgr().getAuthorizationMgr().revokeRole((RevokeRoleStmt) stmt); } }); return null; } @Override public ShowResultSet visitGrantRevokePrivilegeStatement(BaseGrantRevokePrivilegeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { if (stmt instanceof GrantPrivilegeStmt) { context.getGlobalStateMgr().getAuthorizationMgr().grant((GrantPrivilegeStmt) stmt); } else { context.getGlobalStateMgr().getAuthorizationMgr().revoke((RevokePrivilegeStmt) stmt); } }); return null; } @Override public ShowResultSet visitCreateRoleStatement(CreateRoleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthorizationMgr().createRole(stmt); }); return null; } @Override public ShowResultSet visitAlterRoleStatement(AlterRoleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthorizationMgr().alterRole(stmt); }); return null; } @Override public ShowResultSet visitDropRoleStatement(DropRoleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthorizationMgr().dropRole(stmt); }); return null; } @Override public ShowResultSet visitSetUserPropertyStatement(SetUserPropertyStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthenticationMgr().updateUserProperty(stmt.getUser(), stmt.getPropertyPairList()); }); return null; } @Override public ShowResultSet visitAlterSystemStatement(AlterSystemStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAlterJobMgr().processAlterCluster(stmt); }); return null; } @Override public ShowResultSet visitCancelAlterSystemStatement(CancelAlterSystemStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAlterJobMgr().getClusterHandler().cancel(stmt); }); return null; } @Override public ShowResultSet visitAlterDatabaseQuotaStatement(AlterDatabaseQuotaStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().alterDatabaseQuota(stmt); }); return null; } @Override public ShowResultSet visitAlterDatabaseRenameStatement(AlterDatabaseRenameStatement stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().renameDatabase(stmt); }); return null; } @Override public ShowResultSet visitRecoverDbStatement(RecoverDbStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().recoverDatabase(stmt); }); return null; } @Override public ShowResultSet visitRecoverTableStatement(RecoverTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().recoverTable(stmt); }); return null; } @Override public ShowResultSet visitRecoverPartitionStatement(RecoverPartitionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().recoverPartition(stmt); }); return null; } @Override public ShowResultSet visitCreateViewStatement(CreateViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { GlobalStateMgr.getCurrentState().getMetadataMgr().createView(stmt); }); return null; } @Override public ShowResultSet visitBackupStatement(BackupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().process(stmt); }); return null; } @Override public ShowResultSet visitRestoreStatement(RestoreStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().process(stmt); }); return null; } @Override public ShowResultSet visitCancelBackupStatement(CancelBackupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().cancel(stmt); }); return null; } @Override public ShowResultSet visitCreateRepositoryStatement(CreateRepositoryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().createRepository(stmt); }); return null; } @Override public ShowResultSet visitDropRepositoryStatement(DropRepositoryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().dropRepository(stmt); }); return null; } @Override public ShowResultSet visitSyncStatement(SyncStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { }); return null; } @Override public ShowResultSet visitTruncateTableStatement(TruncateTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().truncateTable(stmt, context); }); return null; } @Override public ShowResultSet visitAdminRepairTableStatement(AdminRepairTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getTabletChecker().repairTable(stmt); }); return null; } @Override public ShowResultSet visitAdminCancelRepairTableStatement(AdminCancelRepairTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getTabletChecker().cancelRepairTable(stmt); }); return null; } @Override public ShowResultSet visitAdminSetConfigStatement(AdminSetConfigStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getNodeMgr().setConfig(stmt); if (stmt.getConfig().containsKey("mysql_server_version")) { String version = stmt.getConfig().getMap().get("mysql_server_version"); if (!Strings.isNullOrEmpty(version)) { GlobalVariable.version = version; } } }); return null; } @Override public ShowResultSet visitCreateFileStatement(CreateFileStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getSmallFileMgr().createFile(stmt); }); return null; } @Override public ShowResultSet visitDropFileStatement(DropFileStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getSmallFileMgr().dropFile(stmt); }); return null; } @Override public ShowResultSet visitInstallPluginStatement(InstallPluginStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { try { context.getGlobalStateMgr().getPluginMgr().installPlugin(stmt); } catch (IOException e) { throw new RuntimeException(e); } }); return null; } @Override public ShowResultSet visitUninstallPluginStatement(UninstallPluginStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { try { PluginInfo info = context.getGlobalStateMgr().getPluginMgr().uninstallPlugin(stmt.getPluginName()); if (null != info) { GlobalStateMgr.getCurrentState().getEditLog().logUninstallPlugin(info); } LOG.info("uninstall plugin = " + stmt.getPluginName()); } catch (IOException e) { throw new RuntimeException(e); } }); return null; } @Override public ShowResultSet visitAdminCheckTabletsStatement(AdminCheckTabletsStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().checkTablets(stmt); }); return null; } @Override public ShowResultSet visitAdminSetPartitionVersionStmt(AdminSetPartitionVersionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getLocalMetastore().setPartitionVersion(stmt)); return null; } @Override public ShowResultSet visitAdminSetReplicaStatusStatement(AdminSetReplicaStatusStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().setReplicaStatus(stmt); }); return null; } @Override public ShowResultSet visitCreateResourceStatement(CreateResourceStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceMgr().createResource(stmt); }); return null; } @Override public ShowResultSet visitDropResourceStatement(DropResourceStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceMgr().dropResource(stmt); }); return null; } @Override public ShowResultSet visitAlterResourceStatement(AlterResourceStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceMgr().alterResource(stmt); }); return null; } @Override public ShowResultSet visitCancelExportStatement(CancelExportStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getExportMgr().cancelExportJob(stmt); }); return null; } @Override public ShowResultSet visitCreateAnalyzeJobStatement(CreateAnalyzeJobStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { AnalyzeJob analyzeJob; if (stmt.isNative()) { analyzeJob = new NativeAnalyzeJob(stmt.getDbId(), stmt.getTableId(), stmt.getColumnNames(), stmt.getColumnTypes(), stmt.isSample() ? StatsConstants.AnalyzeType.SAMPLE : StatsConstants.AnalyzeType.FULL, StatsConstants.ScheduleType.SCHEDULE, stmt.getProperties(), StatsConstants.ScheduleStatus.PENDING, LocalDateTime.MIN); } else { analyzeJob = new ExternalAnalyzeJob(stmt.getTableName().getCatalog(), stmt.getTableName().getDb(), stmt.getTableName().getTbl(), stmt.getColumnNames(), stmt.getColumnTypes(), stmt.isSample() ? StatsConstants.AnalyzeType.SAMPLE : StatsConstants.AnalyzeType.FULL, StatsConstants.ScheduleType.SCHEDULE, stmt.getProperties(), StatsConstants.ScheduleStatus.PENDING, LocalDateTime.MIN); } context.getGlobalStateMgr().getAnalyzeMgr().addAnalyzeJob(analyzeJob); ConnectContext statsConnectCtx = StatisticUtils.buildConnectContext(); statsConnectCtx.getSessionVariable().setStatisticCollectParallelism( context.getSessionVariable().getStatisticCollectParallelism()); Thread thread = new Thread(() -> { statsConnectCtx.setThreadLocalInfo(); StatisticExecutor statisticExecutor = new StatisticExecutor(); analyzeJob.run(statsConnectCtx, statisticExecutor); }); thread.start(); }); return null; } @Override public ShowResultSet visitDropAnalyzeStatement(DropAnalyzeJobStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException( () -> context.getGlobalStateMgr().getAnalyzeMgr().removeAnalyzeJob(stmt.getId())); return null; } @Override public ShowResultSet visitRefreshTableStatement(RefreshTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().refreshExternalTable(stmt); }); return null; } @Override public ShowResultSet visitCreateResourceGroupStatement(CreateResourceGroupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceGroupMgr().createResourceGroup(stmt); }); return null; } @Override public ShowResultSet visitDropResourceGroupStatement(DropResourceGroupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceGroupMgr().dropResourceGroup(stmt); }); return null; } @Override public ShowResultSet visitAlterResourceGroupStatement(AlterResourceGroupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceGroupMgr().alterResourceGroup(stmt); }); return null; } @Override public ShowResultSet visitCreateCatalogStatement(CreateCatalogStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { String catalogName = stmt.getCatalogName(); if (context.getGlobalStateMgr().getCatalogMgr().catalogExists(catalogName)) { if (stmt.isIfNotExists()) { LOG.info("create catalog[{}] which already exists", catalogName); return; } else { ErrorReport.reportDdlException(ErrorCode.ERR_CATALOG_EXISTED_ERROR, catalogName); } } context.getGlobalStateMgr().getCatalogMgr().createCatalog(stmt); }); return null; } @Override @Override public ShowResultSet visitAlterCatalogStatement(AlterCatalogStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getCatalogMgr().alterCatalog(stmt); }); return null; } @Override public ShowResultSet visitSubmitTaskStatement(SubmitTaskStmt stmt, ConnectContext context) { try { return context.getGlobalStateMgr().getTaskManager().handleSubmitTaskStmt(stmt); } catch (UserException e) { throw new RuntimeException(e); } } @Override public ShowResultSet visitDropTaskStmt(DropTaskStmt dropTaskStmt, ConnectContext context) { TaskManager taskManager = context.getGlobalStateMgr().getTaskManager(); String taskName = dropTaskStmt.getTaskName().getName(); if (!taskManager.containTask(taskName)) { throw new SemanticException("Task " + taskName + " is not exist"); } Task task = taskManager.getTask(taskName); if (task.getSource() == Constants.TaskSource.MV && !dropTaskStmt.isForce()) { throw new RuntimeException("Can not drop task generated by materialized view. You can use " + "DROP MATERIALIZED VIEW to drop task, when the materialized view is deleted, " + "the related task will be deleted automatically."); } taskManager.dropTasks(Collections.singletonList(task.getId()), false); return null; } @Override public ShowResultSet visitCreateStorageVolumeStatement(CreateStorageVolumeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { try { context.getGlobalStateMgr().getStorageVolumeMgr().createStorageVolume(stmt); } catch (AlreadyExistsException e) { if (stmt.isSetIfNotExists()) { LOG.info("create storage volume[{}] which already exists", stmt.getName()); } else { throw e; } } }); return null; } @Override public ShowResultSet visitAlterStorageVolumeStatement(AlterStorageVolumeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getStorageVolumeMgr().updateStorageVolume(stmt) ); return null; } @Override public ShowResultSet visitDropStorageVolumeStatement(DropStorageVolumeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { try { context.getGlobalStateMgr().getStorageVolumeMgr().removeStorageVolume(stmt); } catch (MetaNotFoundException e) { if (stmt.isSetIfExists()) { LOG.info("drop storage volume[{}] which does not exist", stmt.getName()); } else { throw e; } } }); return null; } @Override public ShowResultSet visitSetDefaultStorageVolumeStatement(SetDefaultStorageVolumeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getStorageVolumeMgr().setDefaultStorageVolume(stmt) ); return null; } @Override public ShowResultSet visitCreatePipeStatement(CreatePipeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getPipeManager().createPipe(stmt) ); return null; } @Override public ShowResultSet visitDropPipeStatement(DropPipeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getPipeManager().dropPipe(stmt) ); return null; } @Override public ShowResultSet visitAlterPipeStatement(AlterPipeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getPipeManager().alterPipe(stmt) ); return null; } @Override public ShowResultSet visitCreateDataCacheRuleStatement(CreateDataCacheRuleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { DataCacheMgr.getInstance().createCacheRule(stmt.getTarget(), stmt.getPredicates(), stmt.getPriority(), stmt.getProperties()); }); return null; } @Override public ShowResultSet visitDropDataCacheRuleStatement(DropDataCacheRuleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { DataCacheMgr.getInstance().dropCacheRule(stmt.getCacheRuleId()); }); return null; } @Override public ShowResultSet visitClearDataCacheRulesStatement(ClearDataCacheRulesStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { DataCacheMgr.getInstance().clearRules(); }); return null; } @Override public ShowResultSet visitDataCacheSelectStatement(DataCacheSelectStatement statement, ConnectContext context) { DataCacheSelectMetrics metrics = null; try { metrics = DataCacheSelectExecutor.cacheSelect(statement, context); } catch (Exception e) { LOG.warn(e); throw new RuntimeException(e.getMessage()); } return metrics.getShowResultSet(statement.isVerbose()); } @Override public ShowResultSet visitCreateDictionaryStatement(CreateDictionaryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getDictionaryMgr().createDictionary(stmt, context.getDatabase()); }); return null; } @Override public ShowResultSet visitDropDictionaryStatement(DropDictionaryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getDictionaryMgr().dropDictionary(stmt.getDictionaryName(), stmt.isCacheOnly(), false); }); return null; } @Override public ShowResultSet visitRefreshDictionaryStatement(RefreshDictionaryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getDictionaryMgr().refreshDictionary(stmt.getDictionaryName()); }); return null; } @Override public ShowResultSet visitCancelRefreshDictionaryStatement(CancelRefreshDictionaryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getDictionaryMgr().cancelRefreshDictionary(stmt.getDictionaryName()); }); return null; } }
class StmtExecutorVisitor implements AstVisitor<ShowResultSet, ConnectContext> { private static final Logger LOG = LogManager.getLogger(StmtExecutorVisitor.class); private static final StmtExecutorVisitor INSTANCE = new StmtExecutorVisitor(); public static StmtExecutorVisitor getInstance() { return INSTANCE; } protected StmtExecutorVisitor() { } @Override public ShowResultSet visitNode(ParseNode node, ConnectContext context) { throw new RuntimeException(new DdlException("unsupported statement: " + node.toSql())); } @Override public ShowResultSet visitCreateDbStatement(CreateDbStmt stmt, ConnectContext context) { String fullDbName = stmt.getFullDbName(); String catalogName = stmt.getCatalogName(); Map<String, String> properties = stmt.getProperties(); boolean isSetIfNotExists = stmt.isSetIfNotExists(); ErrorReport.wrapWithRuntimeException(() -> { try { context.getGlobalStateMgr().getMetadataMgr().createDb(catalogName, fullDbName, properties); } catch (AlreadyExistsException e) { if (isSetIfNotExists) { LOG.info("create database[{}] which already exists", fullDbName); } else { ErrorReport.reportDdlException(ErrorCode.ERR_DB_CREATE_EXISTS, fullDbName); } } }); return null; } @Override public ShowResultSet visitDropDbStatement(DropDbStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { String catalogName = stmt.getCatalogName(); String dbName = stmt.getDbName(); boolean isForceDrop = stmt.isForceDrop(); try { context.getGlobalStateMgr().getMetadataMgr().dropDb(catalogName, dbName, isForceDrop); } catch (MetaNotFoundException e) { if (stmt.isSetIfExists()) { LOG.info("drop database[{}] which does not exist", dbName); } else { ErrorReport.reportDdlException(ErrorCode.ERR_DB_DROP_EXISTS, dbName); } } }); return null; } @Override public ShowResultSet visitCreateFunctionStatement(CreateFunctionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { FunctionName name = stmt.getFunctionName(); if (name.isGlobalFunction()) { context.getGlobalStateMgr() .getGlobalFunctionMgr() .userAddFunction(stmt.getFunction(), stmt.shouldReplaceIfExists()); } else { Database db = context.getGlobalStateMgr().getDb(name.getDb()); if (db == null) { ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, name.getDb()); } db.addFunction(stmt.getFunction(), stmt.shouldReplaceIfExists()); } }); return null; } @Override public ShowResultSet visitDropFunctionStatement(DropFunctionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { FunctionName name = stmt.getFunctionName(); if (name.isGlobalFunction()) { context.getGlobalStateMgr().getGlobalFunctionMgr().userDropFunction(stmt.getFunctionSearchDesc()); } else { Database db = context.getGlobalStateMgr().getDb(name.getDb()); if (db == null) { ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, name.getDb()); } db.dropFunction(stmt.getFunctionSearchDesc()); } }); return null; } @Override public ShowResultSet visitCreateTableStatement(CreateTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().createTable(stmt); }); return null; } @Override public ShowResultSet visitCreateTemporaryTableStatement(CreateTemporaryTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().createTemporaryTable(stmt); }); return null; } @Override public ShowResultSet visitCreateTableLikeStatement(CreateTableLikeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().createTableLike(stmt); }); return null; } @Override public ShowResultSet visitCreateTemporaryTableLikeStatement( CreateTemporaryTableLikeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr() .createTemporaryTable((CreateTemporaryTableStmt) stmt.getCreateTableStmt()); }); return null; } @Override public ShowResultSet visitDropTableStatement(DropTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { if (stmt.getTemporaryTableMark()) { DropTemporaryTableStmt dropTemporaryTableStmt = new DropTemporaryTableStmt( stmt.isSetIfExists(), stmt.getTbl(), stmt.isForceDrop()); dropTemporaryTableStmt.setSessionId(context.getSessionId()); context.getGlobalStateMgr().getMetadataMgr().dropTemporaryTable(dropTemporaryTableStmt); } else { context.getGlobalStateMgr().getMetadataMgr().dropTable(stmt); } }); return null; } @Override public ShowResultSet visitDropTemporaryTableStatement(DropTemporaryTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().dropTemporaryTable(stmt); }); return null; } @Override public ShowResultSet visitCleanTemporaryTableStatement(CleanTemporaryTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().cleanTemporaryTables(stmt); }); return null; } @Override public ShowResultSet visitCreateMaterializedViewStmt(CreateMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().createMaterializedView(stmt); }); return null; } @Override public ShowResultSet visitCreateMaterializedViewStatement(CreateMaterializedViewStatement stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().createMaterializedView(stmt); }); return null; } @Override public ShowResultSet visitDropMaterializedViewStatement(DropMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().dropMaterializedView(stmt); }); return null; } @Override public ShowResultSet visitAlterMaterializedViewStatement(AlterMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().alterMaterializedView(stmt); }); return null; } @Override public ShowResultSet visitRefreshMaterializedViewStatement(RefreshMaterializedViewStatement stmt, ConnectContext context) { List<String> info = Lists.newArrayList(); ErrorReport.wrapWithRuntimeException(() -> { String taskRunId = context.getGlobalStateMgr().getLocalMetastore().refreshMaterializedView(stmt); info.add(taskRunId); }); return new ShowResultSet(RefreshMaterializedViewStatement.META_DATA, Arrays.asList(info)); } @Override public ShowResultSet visitCancelRefreshMaterializedViewStatement(CancelRefreshMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore() .cancelRefreshMaterializedView( stmt.getMvName().getDb(), stmt.getMvName().getTbl()); }); return null; } @Override public ShowResultSet visitAlterTableStatement(AlterTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getMetadataMgr().alterTable(stmt); }); return null; } @Override public ShowResultSet visitAlterViewStatement(AlterViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().alterView(stmt); }); return null; } @Override public ShowResultSet visitCancelAlterTableStatement(CancelAlterTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().cancelAlter(stmt); }); return null; } @Override public ShowResultSet visitLoadStatement(LoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { EtlJobType jobType = stmt.getEtlJobType(); if (jobType == EtlJobType.UNKNOWN) { throw new DdlException("Unknown load job type"); } if (jobType == EtlJobType.HADOOP && Config.disable_hadoop_load) { throw new DdlException("Load job by hadoop cluster is disabled." + " Try using broker load. See 'help broker load;'"); } context.getGlobalStateMgr().getLoadMgr().createLoadJobFromStmt(stmt, context); }); return null; } @Override public ShowResultSet visitCancelLoadStatement(CancelLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLoadMgr().cancelLoadJob(stmt); }); return null; } @Override public ShowResultSet visitCancelCompactionStatement(CancelCompactionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getCompactionMgr().cancelCompaction(stmt.getTxnId()); }); return null; } @Override public ShowResultSet visitCreateRoutineLoadStatement(CreateRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().createRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitPauseRoutineLoadStatement(PauseRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().pauseRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitResumeRoutineLoadStatement(ResumeRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().resumeRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitStopRoutineLoadStatement(StopRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().stopRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitAlterRoutineLoadStatement(AlterRoutineLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getRoutineLoadMgr().alterRoutineLoadJob(stmt); }); return null; } @Override public ShowResultSet visitAlterLoadStatement(AlterLoadStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLoadMgr().alterLoadJob(stmt); }); return null; } @Override public ShowResultSet visitCreateUserStatement(CreateUserStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthenticationMgr().createUser(stmt); }); return null; } @Override public ShowResultSet visitAlterUserStatement(AlterUserStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthenticationMgr() .alterUser(stmt.getUserIdentity(), stmt.getAuthenticationInfo()); }); return null; } @Override public ShowResultSet visitDropUserStatement(DropUserStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthenticationMgr().dropUser(stmt); }); return null; } @Override public ShowResultSet visitGrantRevokeRoleStatement(BaseGrantRevokeRoleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { if (stmt instanceof GrantRoleStmt) { context.getGlobalStateMgr().getAuthorizationMgr().grantRole((GrantRoleStmt) stmt); } else { context.getGlobalStateMgr().getAuthorizationMgr().revokeRole((RevokeRoleStmt) stmt); } }); return null; } @Override public ShowResultSet visitGrantRevokePrivilegeStatement(BaseGrantRevokePrivilegeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { if (stmt instanceof GrantPrivilegeStmt) { context.getGlobalStateMgr().getAuthorizationMgr().grant((GrantPrivilegeStmt) stmt); } else { context.getGlobalStateMgr().getAuthorizationMgr().revoke((RevokePrivilegeStmt) stmt); } }); return null; } @Override public ShowResultSet visitCreateRoleStatement(CreateRoleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthorizationMgr().createRole(stmt); }); return null; } @Override public ShowResultSet visitAlterRoleStatement(AlterRoleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthorizationMgr().alterRole(stmt); }); return null; } @Override public ShowResultSet visitDropRoleStatement(DropRoleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthorizationMgr().dropRole(stmt); }); return null; } @Override public ShowResultSet visitSetUserPropertyStatement(SetUserPropertyStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAuthenticationMgr().updateUserProperty(stmt.getUser(), stmt.getPropertyPairList()); }); return null; } @Override public ShowResultSet visitAlterSystemStatement(AlterSystemStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAlterJobMgr().processAlterCluster(stmt); }); return null; } @Override public ShowResultSet visitCancelAlterSystemStatement(CancelAlterSystemStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getAlterJobMgr().getClusterHandler().cancel(stmt); }); return null; } @Override public ShowResultSet visitAlterDatabaseQuotaStatement(AlterDatabaseQuotaStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().alterDatabaseQuota(stmt); }); return null; } @Override public ShowResultSet visitAlterDatabaseRenameStatement(AlterDatabaseRenameStatement stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().renameDatabase(stmt); }); return null; } @Override public ShowResultSet visitRecoverDbStatement(RecoverDbStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().recoverDatabase(stmt); }); return null; } @Override public ShowResultSet visitRecoverTableStatement(RecoverTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().recoverTable(stmt); }); return null; } @Override public ShowResultSet visitRecoverPartitionStatement(RecoverPartitionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().recoverPartition(stmt); }); return null; } @Override public ShowResultSet visitCreateViewStatement(CreateViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { GlobalStateMgr.getCurrentState().getMetadataMgr().createView(stmt); }); return null; } @Override public ShowResultSet visitBackupStatement(BackupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().process(stmt); }); return null; } @Override public ShowResultSet visitRestoreStatement(RestoreStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().process(stmt); }); return null; } @Override public ShowResultSet visitCancelBackupStatement(CancelBackupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().cancel(stmt); }); return null; } @Override public ShowResultSet visitCreateRepositoryStatement(CreateRepositoryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().createRepository(stmt); }); return null; } @Override public ShowResultSet visitDropRepositoryStatement(DropRepositoryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getBackupHandler().dropRepository(stmt); }); return null; } @Override public ShowResultSet visitSyncStatement(SyncStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { }); return null; } @Override public ShowResultSet visitTruncateTableStatement(TruncateTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().truncateTable(stmt, context); }); return null; } @Override public ShowResultSet visitAdminRepairTableStatement(AdminRepairTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getTabletChecker().repairTable(stmt); }); return null; } @Override public ShowResultSet visitAdminCancelRepairTableStatement(AdminCancelRepairTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getTabletChecker().cancelRepairTable(stmt); }); return null; } @Override public ShowResultSet visitAdminSetConfigStatement(AdminSetConfigStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getNodeMgr().setConfig(stmt); if (stmt.getConfig().containsKey("mysql_server_version")) { String version = stmt.getConfig().getMap().get("mysql_server_version"); if (!Strings.isNullOrEmpty(version)) { GlobalVariable.version = version; } } }); return null; } @Override public ShowResultSet visitCreateFileStatement(CreateFileStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getSmallFileMgr().createFile(stmt); }); return null; } @Override public ShowResultSet visitDropFileStatement(DropFileStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getSmallFileMgr().dropFile(stmt); }); return null; } @Override public ShowResultSet visitInstallPluginStatement(InstallPluginStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { try { context.getGlobalStateMgr().getPluginMgr().installPlugin(stmt); } catch (IOException e) { throw new RuntimeException(e); } }); return null; } @Override public ShowResultSet visitUninstallPluginStatement(UninstallPluginStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { try { PluginInfo info = context.getGlobalStateMgr().getPluginMgr().uninstallPlugin(stmt.getPluginName()); if (null != info) { GlobalStateMgr.getCurrentState().getEditLog().logUninstallPlugin(info); } LOG.info("uninstall plugin = " + stmt.getPluginName()); } catch (IOException e) { throw new RuntimeException(e); } }); return null; } @Override public ShowResultSet visitAdminCheckTabletsStatement(AdminCheckTabletsStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().checkTablets(stmt); }); return null; } @Override public ShowResultSet visitAdminSetPartitionVersionStmt(AdminSetPartitionVersionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getLocalMetastore().setPartitionVersion(stmt)); return null; } @Override public ShowResultSet visitAdminSetReplicaStatusStatement(AdminSetReplicaStatusStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getLocalMetastore().setReplicaStatus(stmt); }); return null; } @Override public ShowResultSet visitCreateResourceStatement(CreateResourceStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceMgr().createResource(stmt); }); return null; } @Override public ShowResultSet visitDropResourceStatement(DropResourceStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceMgr().dropResource(stmt); }); return null; } @Override public ShowResultSet visitAlterResourceStatement(AlterResourceStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceMgr().alterResource(stmt); }); return null; } @Override public ShowResultSet visitCancelExportStatement(CancelExportStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getExportMgr().cancelExportJob(stmt); }); return null; } @Override public ShowResultSet visitCreateAnalyzeJobStatement(CreateAnalyzeJobStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { AnalyzeJob analyzeJob; if (stmt.isNative()) { analyzeJob = new NativeAnalyzeJob(stmt.getDbId(), stmt.getTableId(), stmt.getColumnNames(), stmt.getColumnTypes(), stmt.isSample() ? StatsConstants.AnalyzeType.SAMPLE : StatsConstants.AnalyzeType.FULL, StatsConstants.ScheduleType.SCHEDULE, stmt.getProperties(), StatsConstants.ScheduleStatus.PENDING, LocalDateTime.MIN); } else { analyzeJob = new ExternalAnalyzeJob(stmt.getTableName().getCatalog(), stmt.getTableName().getDb(), stmt.getTableName().getTbl(), stmt.getColumnNames(), stmt.getColumnTypes(), stmt.isSample() ? StatsConstants.AnalyzeType.SAMPLE : StatsConstants.AnalyzeType.FULL, StatsConstants.ScheduleType.SCHEDULE, stmt.getProperties(), StatsConstants.ScheduleStatus.PENDING, LocalDateTime.MIN); } context.getGlobalStateMgr().getAnalyzeMgr().addAnalyzeJob(analyzeJob); ConnectContext statsConnectCtx = StatisticUtils.buildConnectContext(); statsConnectCtx.getSessionVariable().setStatisticCollectParallelism( context.getSessionVariable().getStatisticCollectParallelism()); Thread thread = new Thread(() -> { statsConnectCtx.setThreadLocalInfo(); StatisticExecutor statisticExecutor = new StatisticExecutor(); analyzeJob.run(statsConnectCtx, statisticExecutor); }); thread.start(); }); return null; } @Override public ShowResultSet visitDropAnalyzeStatement(DropAnalyzeJobStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException( () -> context.getGlobalStateMgr().getAnalyzeMgr().removeAnalyzeJob(stmt.getId())); return null; } @Override public ShowResultSet visitRefreshTableStatement(RefreshTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().refreshExternalTable(stmt); }); return null; } @Override public ShowResultSet visitCreateResourceGroupStatement(CreateResourceGroupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceGroupMgr().createResourceGroup(stmt); }); return null; } @Override public ShowResultSet visitDropResourceGroupStatement(DropResourceGroupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceGroupMgr().dropResourceGroup(stmt); }); return null; } @Override public ShowResultSet visitAlterResourceGroupStatement(AlterResourceGroupStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getResourceGroupMgr().alterResourceGroup(stmt); }); return null; } @Override public ShowResultSet visitCreateCatalogStatement(CreateCatalogStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { String catalogName = stmt.getCatalogName(); if (context.getGlobalStateMgr().getCatalogMgr().catalogExists(catalogName)) { if (stmt.isIfNotExists()) { LOG.info("create catalog[{}] which already exists", catalogName); return; } else { ErrorReport.reportDdlException(ErrorCode.ERR_CATALOG_EXISTED_ERROR, catalogName); } } context.getGlobalStateMgr().getCatalogMgr().createCatalog(stmt); }); return null; } @Override @Override public ShowResultSet visitAlterCatalogStatement(AlterCatalogStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getCatalogMgr().alterCatalog(stmt); }); return null; } @Override public ShowResultSet visitSubmitTaskStatement(SubmitTaskStmt stmt, ConnectContext context) { try { return context.getGlobalStateMgr().getTaskManager().handleSubmitTaskStmt(stmt); } catch (UserException e) { throw new RuntimeException(e); } } @Override public ShowResultSet visitDropTaskStmt(DropTaskStmt dropTaskStmt, ConnectContext context) { TaskManager taskManager = context.getGlobalStateMgr().getTaskManager(); String taskName = dropTaskStmt.getTaskName().getName(); if (!taskManager.containTask(taskName)) { throw new SemanticException("Task " + taskName + " is not exist"); } Task task = taskManager.getTask(taskName); if (task.getSource() == Constants.TaskSource.MV && !dropTaskStmt.isForce()) { throw new RuntimeException("Can not drop task generated by materialized view. You can use " + "DROP MATERIALIZED VIEW to drop task, when the materialized view is deleted, " + "the related task will be deleted automatically."); } taskManager.dropTasks(Collections.singletonList(task.getId()), false); return null; } @Override public ShowResultSet visitCreateStorageVolumeStatement(CreateStorageVolumeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { try { context.getGlobalStateMgr().getStorageVolumeMgr().createStorageVolume(stmt); } catch (AlreadyExistsException e) { if (stmt.isSetIfNotExists()) { LOG.info("create storage volume[{}] which already exists", stmt.getName()); } else { throw e; } } }); return null; } @Override public ShowResultSet visitAlterStorageVolumeStatement(AlterStorageVolumeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getStorageVolumeMgr().updateStorageVolume(stmt) ); return null; } @Override public ShowResultSet visitDropStorageVolumeStatement(DropStorageVolumeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { try { context.getGlobalStateMgr().getStorageVolumeMgr().removeStorageVolume(stmt); } catch (MetaNotFoundException e) { if (stmt.isSetIfExists()) { LOG.info("drop storage volume[{}] which does not exist", stmt.getName()); } else { throw e; } } }); return null; } @Override public ShowResultSet visitSetDefaultStorageVolumeStatement(SetDefaultStorageVolumeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getStorageVolumeMgr().setDefaultStorageVolume(stmt) ); return null; } @Override public ShowResultSet visitCreatePipeStatement(CreatePipeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getPipeManager().createPipe(stmt) ); return null; } @Override public ShowResultSet visitDropPipeStatement(DropPipeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getPipeManager().dropPipe(stmt) ); return null; } @Override public ShowResultSet visitAlterPipeStatement(AlterPipeStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> context.getGlobalStateMgr().getPipeManager().alterPipe(stmt) ); return null; } @Override public ShowResultSet visitCreateDataCacheRuleStatement(CreateDataCacheRuleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { DataCacheMgr.getInstance().createCacheRule(stmt.getTarget(), stmt.getPredicates(), stmt.getPriority(), stmt.getProperties()); }); return null; } @Override public ShowResultSet visitDropDataCacheRuleStatement(DropDataCacheRuleStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { DataCacheMgr.getInstance().dropCacheRule(stmt.getCacheRuleId()); }); return null; } @Override public ShowResultSet visitClearDataCacheRulesStatement(ClearDataCacheRulesStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { DataCacheMgr.getInstance().clearRules(); }); return null; } @Override public ShowResultSet visitDataCacheSelectStatement(DataCacheSelectStatement statement, ConnectContext context) { DataCacheSelectMetrics metrics = null; try { metrics = DataCacheSelectExecutor.cacheSelect(statement, context); } catch (Exception e) { LOG.warn(e); throw new RuntimeException(e.getMessage()); } return metrics.getShowResultSet(statement.isVerbose()); } @Override public ShowResultSet visitCreateDictionaryStatement(CreateDictionaryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getDictionaryMgr().createDictionary(stmt, context.getDatabase()); }); return null; } @Override public ShowResultSet visitDropDictionaryStatement(DropDictionaryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getDictionaryMgr().dropDictionary(stmt.getDictionaryName(), stmt.isCacheOnly(), false); }); return null; } @Override public ShowResultSet visitRefreshDictionaryStatement(RefreshDictionaryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getDictionaryMgr().refreshDictionary(stmt.getDictionaryName()); }); return null; } @Override public ShowResultSet visitCancelRefreshDictionaryStatement(CancelRefreshDictionaryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getDictionaryMgr().cancelRefreshDictionary(stmt.getDictionaryName()); }); return null; } }
not sure your meaning, `the union of targetColumns and all auto-increment key columns are all the table columns` this is not ture.
public static void analyze(InsertStmt insertStmt, ConnectContext session) { QueryRelation query = insertStmt.getQueryStatement().getQueryRelation(); new QueryAnalyzer(session).analyze(insertStmt.getQueryStatement()); List<Table> tables = new ArrayList<>(); AnalyzerUtils.collectSpecifyExternalTables(insertStmt.getQueryStatement(), tables, Table::isHiveTable); tables.stream().map(table -> (HiveTable) table) .forEach(table -> table.useMetadataCache(false)); /* * Target table */ Table table; if (insertStmt.getTargetTable() != null) { table = insertStmt.getTargetTable(); } else { table = getTargetTable(insertStmt, session); } if (table instanceof OlapTable) { OlapTable olapTable = (OlapTable) table; List<Long> targetPartitionIds = Lists.newArrayList(); PartitionNames targetPartitionNames = insertStmt.getTargetPartitionNames(); if (insertStmt.isSpecifyPartitionNames()) { if (targetPartitionNames.getPartitionNames().isEmpty()) { throw new SemanticException("No partition specified in partition lists", targetPartitionNames.getPos()); } List<String> deduplicatePartitionNames = targetPartitionNames.getPartitionNames().stream().distinct().collect(Collectors.toList()); if (deduplicatePartitionNames.size() != targetPartitionNames.getPartitionNames().size()) { insertStmt.setTargetPartitionNames(new PartitionNames(targetPartitionNames.isTemp(), deduplicatePartitionNames, targetPartitionNames.getPartitionColNames(), targetPartitionNames.getPartitionColValues(), targetPartitionNames.getPos())); } for (String partitionName : deduplicatePartitionNames) { if (Strings.isNullOrEmpty(partitionName)) { throw new SemanticException("there are empty partition name", targetPartitionNames.getPos()); } Partition partition = olapTable.getPartition(partitionName, targetPartitionNames.isTemp()); if (partition == null) { throw new SemanticException("Unknown partition '%s' in table '%s'", partitionName, olapTable.getName(), targetPartitionNames.getPos()); } targetPartitionIds.add(partition.getId()); } } else if (insertStmt.isStaticKeyPartitionInsert()) { checkStaticKeyPartitionInsert(insertStmt, table, targetPartitionNames); } else { for (Partition partition : olapTable.getPartitions()) { targetPartitionIds.add(partition.getId()); } if (targetPartitionIds.isEmpty()) { throw new SemanticException("data cannot be inserted into table with empty partition." + "Use `SHOW PARTITIONS FROM %s` to see the currently partitions of this table. ", olapTable.getName()); } } insertStmt.setTargetPartitionIds(targetPartitionIds); } if (table.isIcebergTable() || table.isHiveTable()) { if (table.isHiveTable() && table.isUnPartitioned() && HiveWriteUtils.isS3Url(table.getTableLocation()) && insertStmt.isOverwrite()) { throw new SemanticException("Unsupported insert overwrite hive unpartitioned table with s3 location"); } if (table.isHiveTable() && ((HiveTable) table).getHiveTableType() != HiveTable.HiveTableType.MANAGED_TABLE && !session.getSessionVariable().enableWriteHiveExternalTable()) { throw new SemanticException("Only support to write hive managed table, tableType: " + ((HiveTable) table).getHiveTableType()); } PartitionNames targetPartitionNames = insertStmt.getTargetPartitionNames(); List<String> tablePartitionColumnNames = table.getPartitionColumnNames(); if (insertStmt.getTargetColumnNames() != null) { for (String partitionColName : tablePartitionColumnNames) { if (!insertStmt.getTargetColumnNames().contains(partitionColName)) { throw new SemanticException("Must include partition column %s", partitionColName); } } } else if (insertStmt.isStaticKeyPartitionInsert()) { checkStaticKeyPartitionInsert(insertStmt, table, targetPartitionNames); } List<Column> partitionColumns = tablePartitionColumnNames.stream() .map(table::getColumn) .collect(Collectors.toList()); for (Column column : partitionColumns) { if (isUnSupportedPartitionColumnType(column.getType())) { throw new SemanticException("Unsupported partition column type [%s] for %s table sink", column.getType().canonicalName(), table.getType()); } } } List<Column> targetColumns; Set<String> mentionedColumns = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER); if (insertStmt.getTargetColumnNames() == null) { if (table instanceof OlapTable) { targetColumns = new ArrayList<>(((OlapTable) table).getBaseSchemaWithoutGeneratedColumn()); mentionedColumns = ((OlapTable) table).getBaseSchemaWithoutGeneratedColumn().stream() .map(Column::getName).collect(Collectors.toSet()); } else { targetColumns = new ArrayList<>(table.getBaseSchema()); mentionedColumns = table.getBaseSchema().stream().map(Column::getName).collect(Collectors.toSet()); } } else { targetColumns = new ArrayList<>(); List<Column> autoIncrementKeyColumns = table.getBaseSchema().stream().filter(Column::isKey) .filter(Column::isAutoIncrement).collect(Collectors.toList()); int numSpecifiedKeyColumns = autoIncrementKeyColumns.size(); for (String colName : insertStmt.getTargetColumnNames()) { Column column = table.getColumn(colName); if (column == null) { throw new SemanticException("Unknown column '%s' in '%s'", colName, table.getName()); } if (column.isGeneratedColumn()) { throw new SemanticException("generated column '%s' can not be specified", colName); } if (!mentionedColumns.add(colName)) { throw new SemanticException("Column '%s' specified twice", colName); } if (column.isKey() && !column.isAutoIncrement()) { numSpecifiedKeyColumns++; } targetColumns.add(column); } if (table.isOlapTable()) { OlapTable olapTable = (OlapTable) table; if (olapTable.getKeysType().equals(KeysType.PRIMARY_KEYS)) { if (numSpecifiedKeyColumns != olapTable.getKeysNum()) { throw new SemanticException("should specify all key columns when insert to primary key table"); } if (targetColumns.size() < olapTable.getBaseSchemaWithoutGeneratedColumn().size()) { insertStmt.setUsePartialUpdate(); } } } } if (!insertStmt.usePartialUpdate()) { for (Column column : table.getBaseSchema()) { Column.DefaultValueType defaultValueType = column.getDefaultValueType(); if (defaultValueType == Column.DefaultValueType.NULL && !column.isAllowNull() && !column.isAutoIncrement() && !column.isGeneratedColumn() && !mentionedColumns.contains(column.getName())) { StringBuilder msg = new StringBuilder(); for (String s : mentionedColumns) { msg.append(" ").append(s).append(" "); } throw new SemanticException("'%s' must be explicitly mentioned in column permutation: %s", column.getName(), msg.toString()); } } } int mentionedColumnSize = mentionedColumns.size(); if ((table.isIcebergTable() || table.isHiveTable()) && insertStmt.isStaticKeyPartitionInsert()) { mentionedColumnSize -= table.getPartitionColumnNames().size(); } if (query.getRelationFields().size() != mentionedColumnSize) { throw new SemanticException("Column count doesn't match value count"); } if (query instanceof ValuesRelation) { ValuesRelation valuesRelation = (ValuesRelation) query; for (List<Expr> row : valuesRelation.getRows()) { for (int columnIdx = 0; columnIdx < row.size(); ++columnIdx) { Column column = targetColumns.get(columnIdx); Column.DefaultValueType defaultValueType = column.getDefaultValueType(); if (row.get(columnIdx) instanceof DefaultValueExpr && defaultValueType == Column.DefaultValueType.NULL && !column.isAutoIncrement()) { throw new SemanticException("Column has no default value, column=%s", column.getName()); } AnalyzerUtils.verifyNoAggregateFunctions(row.get(columnIdx), "Values"); AnalyzerUtils.verifyNoWindowFunctions(row.get(columnIdx), "Values"); } } } insertStmt.setTargetTable(table); if (session.getDumpInfo() != null) { session.getDumpInfo().addTable(insertStmt.getTableName().getDb(), table); } }
if (numSpecifiedKeyColumns != olapTable.getKeysNum()) {
public static void analyze(InsertStmt insertStmt, ConnectContext session) { QueryRelation query = insertStmt.getQueryStatement().getQueryRelation(); new QueryAnalyzer(session).analyze(insertStmt.getQueryStatement()); List<Table> tables = new ArrayList<>(); AnalyzerUtils.collectSpecifyExternalTables(insertStmt.getQueryStatement(), tables, Table::isHiveTable); tables.stream().map(table -> (HiveTable) table) .forEach(table -> table.useMetadataCache(false)); /* * Target table */ Table table; if (insertStmt.getTargetTable() != null) { table = insertStmt.getTargetTable(); } else { table = getTargetTable(insertStmt, session); } if (table instanceof OlapTable) { OlapTable olapTable = (OlapTable) table; List<Long> targetPartitionIds = Lists.newArrayList(); PartitionNames targetPartitionNames = insertStmt.getTargetPartitionNames(); if (insertStmt.isSpecifyPartitionNames()) { if (targetPartitionNames.getPartitionNames().isEmpty()) { throw new SemanticException("No partition specified in partition lists", targetPartitionNames.getPos()); } List<String> deduplicatePartitionNames = targetPartitionNames.getPartitionNames().stream().distinct().collect(Collectors.toList()); if (deduplicatePartitionNames.size() != targetPartitionNames.getPartitionNames().size()) { insertStmt.setTargetPartitionNames(new PartitionNames(targetPartitionNames.isTemp(), deduplicatePartitionNames, targetPartitionNames.getPartitionColNames(), targetPartitionNames.getPartitionColValues(), targetPartitionNames.getPos())); } for (String partitionName : deduplicatePartitionNames) { if (Strings.isNullOrEmpty(partitionName)) { throw new SemanticException("there are empty partition name", targetPartitionNames.getPos()); } Partition partition = olapTable.getPartition(partitionName, targetPartitionNames.isTemp()); if (partition == null) { throw new SemanticException("Unknown partition '%s' in table '%s'", partitionName, olapTable.getName(), targetPartitionNames.getPos()); } targetPartitionIds.add(partition.getId()); } } else if (insertStmt.isStaticKeyPartitionInsert()) { checkStaticKeyPartitionInsert(insertStmt, table, targetPartitionNames); } else { for (Partition partition : olapTable.getPartitions()) { targetPartitionIds.add(partition.getId()); } if (targetPartitionIds.isEmpty()) { throw new SemanticException("data cannot be inserted into table with empty partition." + "Use `SHOW PARTITIONS FROM %s` to see the currently partitions of this table. ", olapTable.getName()); } } insertStmt.setTargetPartitionIds(targetPartitionIds); } if (table.isIcebergTable() || table.isHiveTable()) { if (table.isHiveTable() && table.isUnPartitioned() && HiveWriteUtils.isS3Url(table.getTableLocation()) && insertStmt.isOverwrite()) { throw new SemanticException("Unsupported insert overwrite hive unpartitioned table with s3 location"); } if (table.isHiveTable() && ((HiveTable) table).getHiveTableType() != HiveTable.HiveTableType.MANAGED_TABLE && !session.getSessionVariable().enableWriteHiveExternalTable()) { throw new SemanticException("Only support to write hive managed table, tableType: " + ((HiveTable) table).getHiveTableType()); } PartitionNames targetPartitionNames = insertStmt.getTargetPartitionNames(); List<String> tablePartitionColumnNames = table.getPartitionColumnNames(); if (insertStmt.getTargetColumnNames() != null) { for (String partitionColName : tablePartitionColumnNames) { if (!insertStmt.getTargetColumnNames().contains(partitionColName)) { throw new SemanticException("Must include partition column %s", partitionColName); } } } else if (insertStmt.isStaticKeyPartitionInsert()) { checkStaticKeyPartitionInsert(insertStmt, table, targetPartitionNames); } List<Column> partitionColumns = tablePartitionColumnNames.stream() .map(table::getColumn) .collect(Collectors.toList()); for (Column column : partitionColumns) { if (isUnSupportedPartitionColumnType(column.getType())) { throw new SemanticException("Unsupported partition column type [%s] for %s table sink", column.getType().canonicalName(), table.getType()); } } } List<Column> targetColumns; Set<String> mentionedColumns = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER); if (insertStmt.getTargetColumnNames() == null) { if (table instanceof OlapTable) { targetColumns = new ArrayList<>(((OlapTable) table).getBaseSchemaWithoutGeneratedColumn()); mentionedColumns = ((OlapTable) table).getBaseSchemaWithoutGeneratedColumn().stream() .map(Column::getName).collect(Collectors.toSet()); } else { targetColumns = new ArrayList<>(table.getBaseSchema()); mentionedColumns = table.getBaseSchema().stream().map(Column::getName).collect(Collectors.toSet()); } } else { targetColumns = new ArrayList<>(); Set<String> requiredKeyColumns = table.getBaseSchema().stream().filter(Column::isKey) .filter(c -> !c.isAutoIncrement()).map(c -> c.getName().toLowerCase()).collect(Collectors.toSet()); for (String colName : insertStmt.getTargetColumnNames()) { Column column = table.getColumn(colName); if (column == null) { throw new SemanticException("Unknown column '%s' in '%s'", colName, table.getName()); } if (column.isGeneratedColumn()) { throw new SemanticException("generated column '%s' can not be specified", colName); } if (!mentionedColumns.add(colName)) { throw new SemanticException("Column '%s' specified twice", colName); } requiredKeyColumns.remove(colName.toLowerCase()); targetColumns.add(column); } if (table.isOlapTable()) { OlapTable olapTable = (OlapTable) table; if (olapTable.getKeysType().equals(KeysType.PRIMARY_KEYS)) { if (!requiredKeyColumns.isEmpty()) { String missingKeyColumns = String.join(",", requiredKeyColumns); ErrorReport.reportSemanticException(ErrorCode.ERR_MISSING_KEY_COLUMNS, missingKeyColumns); } if (targetColumns.size() < olapTable.getBaseSchemaWithoutGeneratedColumn().size()) { insertStmt.setUsePartialUpdate(); } } } } if (!insertStmt.usePartialUpdate()) { for (Column column : table.getBaseSchema()) { Column.DefaultValueType defaultValueType = column.getDefaultValueType(); if (defaultValueType == Column.DefaultValueType.NULL && !column.isAllowNull() && !column.isAutoIncrement() && !column.isGeneratedColumn() && !mentionedColumns.contains(column.getName())) { StringBuilder msg = new StringBuilder(); for (String s : mentionedColumns) { msg.append(" ").append(s).append(" "); } throw new SemanticException("'%s' must be explicitly mentioned in column permutation: %s", column.getName(), msg.toString()); } } } int mentionedColumnSize = mentionedColumns.size(); if ((table.isIcebergTable() || table.isHiveTable()) && insertStmt.isStaticKeyPartitionInsert()) { mentionedColumnSize -= table.getPartitionColumnNames().size(); } if (query.getRelationFields().size() != mentionedColumnSize) { throw new SemanticException("Column count doesn't match value count"); } if (query instanceof ValuesRelation) { ValuesRelation valuesRelation = (ValuesRelation) query; for (List<Expr> row : valuesRelation.getRows()) { for (int columnIdx = 0; columnIdx < row.size(); ++columnIdx) { Column column = targetColumns.get(columnIdx); Column.DefaultValueType defaultValueType = column.getDefaultValueType(); if (row.get(columnIdx) instanceof DefaultValueExpr && defaultValueType == Column.DefaultValueType.NULL && !column.isAutoIncrement()) { throw new SemanticException("Column has no default value, column=%s", column.getName()); } AnalyzerUtils.verifyNoAggregateFunctions(row.get(columnIdx), "Values"); AnalyzerUtils.verifyNoWindowFunctions(row.get(columnIdx), "Values"); } } } insertStmt.setTargetTable(table); if (session.getDumpInfo() != null) { session.getDumpInfo().addTable(insertStmt.getTableName().getDb(), table); } }
class InsertAnalyzer { private static void checkStaticKeyPartitionInsert(InsertStmt insertStmt, Table table, PartitionNames targetPartitionNames) { List<String> partitionColNames = targetPartitionNames.getPartitionColNames(); List<Expr> partitionColValues = targetPartitionNames.getPartitionColValues(); List<String> tablePartitionColumnNames = table.getPartitionColumnNames(); Preconditions.checkState(partitionColNames.size() == partitionColValues.size(), "Partition column names size must be equal to the partition column values size. %d vs %d", partitionColNames.size(), partitionColValues.size()); if (tablePartitionColumnNames.size() > partitionColNames.size()) { throw new SemanticException("Must include all %d partition columns in the partition clause", tablePartitionColumnNames.size()); } if (tablePartitionColumnNames.size() < partitionColNames.size()) { throw new SemanticException("Only %d partition columns can be included in the partition clause", tablePartitionColumnNames.size()); } Map<String, Long> frequencies = partitionColNames.stream() .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); Optional<Map.Entry<String, Long>> duplicateKey = frequencies.entrySet().stream() .filter(entry -> entry.getValue() > 1).findFirst(); if (duplicateKey.isPresent()) { throw new SemanticException("Found duplicate partition column name %s", duplicateKey.get().getKey()); } for (int i = 0; i < partitionColNames.size(); i++) { String actualName = partitionColNames.get(i); if (!AnalyzerUtils.containsIgnoreCase(tablePartitionColumnNames, actualName)) { throw new SemanticException("Can't find partition column %s", actualName); } Expr partitionValue = partitionColValues.get(i); if (!partitionValue.isLiteral()) { throw new SemanticException("partition value should be literal expression"); } if (partitionValue instanceof NullLiteral) { throw new SemanticException("partition value can't be null"); } LiteralExpr literalExpr = (LiteralExpr) partitionValue; Column column = table.getColumn(actualName); try { Expr expr = LiteralExpr.create(literalExpr.getStringValue(), column.getType()); insertStmt.getTargetPartitionNames().getPartitionColValues().set(i, expr); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } } private static Table getTargetTable(InsertStmt insertStmt, ConnectContext session) { if (insertStmt.useTableFunctionAsTargetTable()) { return insertStmt.makeTableFunctionTable(session.getSessionVariable()); } else if (insertStmt.useBlackHoleTableAsTargetTable()) { return insertStmt.makeBlackHoleTable(); } MetaUtils.normalizationTableName(session, insertStmt.getTableName()); String catalogName = insertStmt.getTableName().getCatalog(); String dbName = insertStmt.getTableName().getDb(); String tableName = insertStmt.getTableName().getTbl(); try { MetaUtils.checkCatalogExistAndReport(catalogName); } catch (AnalysisException e) { ErrorReport.reportSemanticException(ErrorCode.ERR_BAD_CATALOG_ERROR, catalogName); } Table table = MetaUtils.getTable(catalogName, dbName, tableName); if (table instanceof MaterializedView && !insertStmt.isSystem()) { throw new SemanticException( "The data of '%s' cannot be inserted because '%s' is a materialized view," + "and the data of materialized view must be consistent with the base table.", insertStmt.getTableName().getTbl(), insertStmt.getTableName().getTbl()); } if (insertStmt.isOverwrite()) { if (!(table instanceof OlapTable) && !table.isIcebergTable() && !table.isHiveTable()) { throw unsupportedException("Only support insert overwrite olap/iceberg/hive table"); } if (table instanceof OlapTable && ((OlapTable) table).getState() != NORMAL) { String msg = String.format("table state is %s, please wait to insert overwrite until table state is normal", ((OlapTable) table).getState()); throw unsupportedException(msg); } } if (!table.supportInsert()) { if (table.isIcebergTable() || table.isHiveTable()) { throw unsupportedException(String.format("Only support insert into %s table with parquet file format", table.getType())); } throw unsupportedException("Only support insert into olap/mysql/iceberg/hive table"); } if ((table.isHiveTable() || table.isIcebergTable()) && CatalogMgr.isInternalCatalog(catalogName)) { throw unsupportedException(String.format("Doesn't support %s table sink in the internal catalog. " + "You need to use %s catalog.", table.getType(), table.getType())); } return table; } public static boolean isUnSupportedPartitionColumnType(Type type) { return type.isFloat() || type.isDecimalOfAnyVersion() || type.isDatetime(); } }
class InsertAnalyzer { private static void checkStaticKeyPartitionInsert(InsertStmt insertStmt, Table table, PartitionNames targetPartitionNames) { List<String> partitionColNames = targetPartitionNames.getPartitionColNames(); List<Expr> partitionColValues = targetPartitionNames.getPartitionColValues(); List<String> tablePartitionColumnNames = table.getPartitionColumnNames(); Preconditions.checkState(partitionColNames.size() == partitionColValues.size(), "Partition column names size must be equal to the partition column values size. %d vs %d", partitionColNames.size(), partitionColValues.size()); if (tablePartitionColumnNames.size() > partitionColNames.size()) { throw new SemanticException("Must include all %d partition columns in the partition clause", tablePartitionColumnNames.size()); } if (tablePartitionColumnNames.size() < partitionColNames.size()) { throw new SemanticException("Only %d partition columns can be included in the partition clause", tablePartitionColumnNames.size()); } Map<String, Long> frequencies = partitionColNames.stream() .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); Optional<Map.Entry<String, Long>> duplicateKey = frequencies.entrySet().stream() .filter(entry -> entry.getValue() > 1).findFirst(); if (duplicateKey.isPresent()) { throw new SemanticException("Found duplicate partition column name %s", duplicateKey.get().getKey()); } for (int i = 0; i < partitionColNames.size(); i++) { String actualName = partitionColNames.get(i); if (!AnalyzerUtils.containsIgnoreCase(tablePartitionColumnNames, actualName)) { throw new SemanticException("Can't find partition column %s", actualName); } Expr partitionValue = partitionColValues.get(i); if (!partitionValue.isLiteral()) { throw new SemanticException("partition value should be literal expression"); } LiteralExpr literalExpr = (LiteralExpr) partitionValue; Column column = table.getColumn(actualName); try { Type type = literalExpr.isConstantNull() ? Type.NULL : column.getType(); Expr expr = LiteralExpr.create(literalExpr.getStringValue(), type); insertStmt.getTargetPartitionNames().getPartitionColValues().set(i, expr); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } } private static Table getTargetTable(InsertStmt insertStmt, ConnectContext session) { if (insertStmt.useTableFunctionAsTargetTable()) { return insertStmt.makeTableFunctionTable(session.getSessionVariable()); } else if (insertStmt.useBlackHoleTableAsTargetTable()) { return insertStmt.makeBlackHoleTable(); } MetaUtils.normalizationTableName(session, insertStmt.getTableName()); String catalogName = insertStmt.getTableName().getCatalog(); String dbName = insertStmt.getTableName().getDb(); String tableName = insertStmt.getTableName().getTbl(); try { MetaUtils.checkCatalogExistAndReport(catalogName); } catch (AnalysisException e) { ErrorReport.reportSemanticException(ErrorCode.ERR_BAD_CATALOG_ERROR, catalogName); } Table table = MetaUtils.getTable(catalogName, dbName, tableName); if (table instanceof MaterializedView && !insertStmt.isSystem()) { throw new SemanticException( "The data of '%s' cannot be inserted because '%s' is a materialized view," + "and the data of materialized view must be consistent with the base table.", insertStmt.getTableName().getTbl(), insertStmt.getTableName().getTbl()); } if (insertStmt.isOverwrite()) { if (!(table instanceof OlapTable) && !table.isIcebergTable() && !table.isHiveTable()) { throw unsupportedException("Only support insert overwrite olap/iceberg/hive table"); } if (table instanceof OlapTable && ((OlapTable) table).getState() != NORMAL) { String msg = String.format("table state is %s, please wait to insert overwrite until table state is normal", ((OlapTable) table).getState()); throw unsupportedException(msg); } } if (!table.supportInsert()) { if (table.isIcebergTable() || table.isHiveTable()) { throw unsupportedException(String.format("Only support insert into %s table with parquet file format", table.getType())); } throw unsupportedException("Only support insert into olap/mysql/iceberg/hive table"); } if ((table.isHiveTable() || table.isIcebergTable()) && CatalogMgr.isInternalCatalog(catalogName)) { throw unsupportedException(String.format("Doesn't support %s table sink in the internal catalog. " + "You need to use %s catalog.", table.getType(), table.getType())); } return table; } public static boolean isUnSupportedPartitionColumnType(Type type) { return type.isFloat() || type.isDecimalOfAnyVersion() || type.isDatetime(); } }
Please perform the doc validation soon after annotation validation
public void visit(BLangFunction funcNode) { SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env); funcNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.FUNCTION, null); this.analyzeDef(annotationAttachment, funcEnv); }); if (Symbols.isNative(funcNode.symbol)) { return; } analyzeStmt(funcNode.body, funcEnv); this.processWorkers(funcNode, funcEnv); funcNode.docAttachments.forEach(doc -> analyzeDef(doc, funcEnv)); }
funcNode.docAttachments.forEach(doc -> analyzeDef(doc, funcEnv));
public void visit(BLangFunction funcNode) { SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env); funcNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.FUNCTION, null); this.analyzeDef(annotationAttachment, funcEnv); }); funcNode.docAttachments.forEach(doc -> analyzeDef(doc, funcEnv)); if (Symbols.isNative(funcNode.symbol)) { return; } analyzeStmt(funcNode.body, funcEnv); this.processWorkers(funcNode, funcEnv); }
class SemanticAnalyzer extends BLangNodeVisitor { private static final CompilerContext.Key<SemanticAnalyzer> SYMBOL_ANALYZER_KEY = new CompilerContext.Key<>(); private SymbolTable symTable; private SymbolEnter symbolEnter; private Names names; private SymbolResolver symResolver; private TypeChecker typeChecker; private Types types; private DiagnosticLog dlog; private SymbolEnv env; private BType expType; private DiagnosticCode diagCode; private BType resType; public static SemanticAnalyzer getInstance(CompilerContext context) { SemanticAnalyzer semAnalyzer = context.get(SYMBOL_ANALYZER_KEY); if (semAnalyzer == null) { semAnalyzer = new SemanticAnalyzer(context); } return semAnalyzer; } public SemanticAnalyzer(CompilerContext context) { context.put(SYMBOL_ANALYZER_KEY, this); this.symTable = SymbolTable.getInstance(context); this.symbolEnter = SymbolEnter.getInstance(context); this.names = Names.getInstance(context); this.symResolver = SymbolResolver.getInstance(context); this.typeChecker = TypeChecker.getInstance(context); this.types = Types.getInstance(context); this.dlog = DiagnosticLog.getInstance(context); } public BLangPackage analyze(BLangPackage pkgNode) { pkgNode.accept(this); return pkgNode; } public void visit(BLangPackage pkgNode) { if (pkgNode.completedPhases.contains(CompilerPhase.TYPE_CHECK)) { return; } SymbolEnv pkgEnv = symbolEnter.packageEnvs.get(pkgNode.symbol); pkgNode.imports.forEach(importNode -> analyzeDef(importNode, pkgEnv)); pkgNode.topLevelNodes.forEach(topLevelNode -> analyzeDef((BLangNode) topLevelNode, pkgEnv)); analyzeDef(pkgNode.initFunction, pkgEnv); pkgNode.completedPhases.add(CompilerPhase.TYPE_CHECK); } public void visit(BLangImportPackage importPkgNode) { BPackageSymbol pkgSymbol = importPkgNode.symbol; SymbolEnv pkgEnv = symbolEnter.packageEnvs.get(pkgSymbol); if (pkgEnv == null) { return; } analyzeDef(pkgEnv.node, pkgEnv); } public void visit(BLangXMLNS xmlnsNode) { xmlnsNode.type = symTable.stringType; if (xmlnsNode.symbol != null) { return; } symbolEnter.defineNode(xmlnsNode, env); typeChecker.checkExpr(xmlnsNode.namespaceURI, env, Lists.of(symTable.stringType)); } public void visit(BLangXMLNSStatement xmlnsStmtNode) { analyzeNode(xmlnsStmtNode.xmlnsDecl, env); } private void processWorkers(BLangInvokableNode invNode, SymbolEnv invEnv) { if (invNode.workers.size() > 0) { invEnv.scope.entries.putAll(invNode.body.scope.entries); invNode.workers.forEach(e -> this.symbolEnter.defineNode(e, invEnv)); invNode.workers.forEach(e -> analyzeNode(e, invEnv)); } } public void visit(BLangStruct structNode) { BSymbol structSymbol = structNode.symbol; SymbolEnv structEnv = SymbolEnv.createPkgLevelSymbolEnv(structNode, structSymbol.scope, env); structNode.fields.forEach(field -> analyzeDef(field, structEnv)); structNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.STRUCT, null); annotationAttachment.accept(this); }); structNode.docAttachments.forEach(doc -> analyzeDef(doc, structEnv)); } @Override public void visit(BLangEnum enumNode) { enumNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint( BLangAnnotationAttachmentPoint.AttachmentPoint.ENUM, null); annotationAttachment.accept(this); }); BSymbol enumSymbol = enumNode.symbol; SymbolEnv structEnv = SymbolEnv.createPkgLevelSymbolEnv(enumNode, enumSymbol.scope, env); enumNode.docAttachments.forEach(doc -> analyzeDef(doc, structEnv)); } @Override public void visit(BLangDocumentation docNode) { List<BLangIdentifier> tempAttributes = new ArrayList<>(); for (BLangDocumentationAttribute attribute : docNode.attributes) { Optional<BLangIdentifier> matchingAttribute = tempAttributes .stream() .filter(identifier -> identifier.equals(attribute.documentationField)) .findAny(); BSymbol owner = this.env.scope.owner; String qualifiedName = owner.pkgID == null || owner.pkgID == PackageID.DEFAULT || owner.pkgID.name == Names.BUILTIN_PACKAGE ? owner.name.getValue() : (owner.pkgID + ":" + owner.name); String construct = this.env.node.getKind().name().toLowerCase() + " \'" + qualifiedName + "\'"; if (matchingAttribute.isPresent()) { this.dlog.warning(attribute.pos, DiagnosticCode.DUPLICATE_DOCUMENTED_ATTRIBUTE, attribute.documentationField, construct); } tempAttributes.add(attribute.documentationField); BSymbol attributeSymbol; Name attributeName = names.fromIdNode(attribute.documentationField); int ownerSymTag = env.scope.owner.tag; if ((ownerSymTag & SymTag.ANNOTATION) == SymTag.ANNOTATION) { attributeSymbol = this.env.scope.lookup(attributeName).symbol; if (attributeSymbol == null || attributeSymbol.tag != SymTag.ANNOTATION_ATTRIBUTE) { this.dlog.warning(attribute.pos, DiagnosticCode.NO_SUCH_DOCUMENTABLE_ATTRIBUTE, attribute.documentationField, construct); continue; } } else { attributeSymbol = this.env.scope.lookup(attributeName).symbol; if (attributeSymbol == null || attributeSymbol.tag != SymTag.VARIABLE) { this.dlog.warning(attribute.pos, DiagnosticCode.NO_SUCH_DOCUMENTABLE_ATTRIBUTE, attribute.documentationField, construct); continue; } } attribute.type = attributeSymbol.type; } } public void visit(BLangAnnotation annotationNode) { SymbolEnv annotationEnv = SymbolEnv.createAnnotationEnv(annotationNode, annotationNode.symbol.scope, env); annotationNode.attributes.forEach(attribute -> { analyzeNode(attribute, annotationEnv); }); annotationNode.attachmentPoints.forEach(point -> { if (point.pkgAlias != null) { BSymbol pkgSymbol = symResolver.resolvePkgSymbol(annotationNode.pos, annotationEnv, names.fromIdNode(point.pkgAlias)); if (pkgSymbol == symTable.notFoundSymbol) { return; } point.pkgPath = pkgSymbol.pkgID.name.getValue(); } }); annotationNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.ANNOTATION, null); annotationAttachment.accept(this); }); annotationNode.docAttachments.forEach(doc -> analyzeDef(doc, annotationEnv)); } public void visit(BLangAnnotAttribute annotationAttribute) { if (annotationAttribute.expr != null) { BType actualType = this.typeChecker.checkExpr(annotationAttribute.expr, env, Lists.of(annotationAttribute.symbol.type), DiagnosticCode.INVALID_OPERATION_INCOMPATIBLE_TYPES).get(0); if (!(this.types.isValueType(annotationAttribute.symbol.type) && this.types.isValueType(actualType))) { this.dlog.error(annotationAttribute.pos, DiagnosticCode.INVALID_DEFAULT_VALUE); } } else { if (!this.types.isAnnotationFieldType(annotationAttribute.symbol.type)) { this.dlog.error(annotationAttribute.pos, DiagnosticCode.INVALID_ATTRIBUTE_TYPE, annotationAttribute.symbol.type); } } } public void visit(BLangAnnotationAttachment annAttachmentNode) { BSymbol symbol = this.symResolver.resolveAnnotation(annAttachmentNode.pos, env, names.fromString(annAttachmentNode.pkgAlias.getValue()), names.fromString(annAttachmentNode.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, annAttachmentNode.getAnnotationName().getValue()); return; } BAnnotationSymbol annotationSymbol = (BAnnotationSymbol) symbol; annAttachmentNode.annotationSymbol = annotationSymbol; if (annotationSymbol.getAttachmentPoints() != null && annotationSymbol.getAttachmentPoints().size() > 0) { BLangAnnotationAttachmentPoint[] attachmentPointsArrray = new BLangAnnotationAttachmentPoint[annotationSymbol.getAttachmentPoints().size()]; Optional<BLangAnnotationAttachmentPoint> matchingAttachmentPoint = Arrays .stream(annotationSymbol.getAttachmentPoints().toArray(attachmentPointsArrray)) .filter(attachmentPoint -> attachmentPoint.equals(annAttachmentNode.attachmentPoint)) .findAny(); if (!matchingAttachmentPoint.isPresent()) { String msg = annAttachmentNode.attachmentPoint.getAttachmentPoint().getValue(); if (annAttachmentNode.attachmentPoint.getPkgPath() != null) { msg = annAttachmentNode.attachmentPoint.getAttachmentPoint().getValue() + "<" + annAttachmentNode.attachmentPoint.getPkgPath() + ">"; } this.dlog.error(annAttachmentNode.pos, DiagnosticCode.ANNOTATION_NOT_ALLOWED, annotationSymbol, msg); } } validateAttributes(annAttachmentNode, annotationSymbol); populateDefaultValues(annAttachmentNode, annotationSymbol); } private void validateAttributes(BLangAnnotationAttachment annAttachmentNode, BAnnotationSymbol annotationSymbol) { annAttachmentNode.attributes.forEach(annotAttachmentAttribute -> { Name attributeName = names.fromIdNode((BLangIdentifier) annotAttachmentAttribute.getName()); BAnnotationAttributeSymbol attributeSymbol = (BAnnotationAttributeSymbol) annotationSymbol.scope.lookup(attributeName).symbol; if (attributeSymbol == null) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.NO_SUCH_ATTRIBUTE, annotAttachmentAttribute.getName(), annotationSymbol.name); return; } if (annotAttachmentAttribute.value.value != null && annotAttachmentAttribute.value.value instanceof BLangExpression) { BType resolvedType = this.typeChecker.checkExpr((BLangExpression) annotAttachmentAttribute.value.value, env, Lists.of(attributeSymbol.type), DiagnosticCode.INCOMPATIBLE_TYPES).get(0); if (resolvedType == symTable.errType) { return; } if (annotAttachmentAttribute.value.value instanceof BLangSimpleVarRef && ((BLangSimpleVarRef) annotAttachmentAttribute.value.value).symbol.flags != Flags.CONST) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.ATTRIBUTE_VAL_CANNOT_REFER_NON_CONST); } return; } else { if (attributeSymbol.type.tag == TypeTags.ARRAY) { if (annotAttachmentAttribute.value.value != null) { if (annotAttachmentAttribute.value.value instanceof BLangExpression) { this.typeChecker.checkExpr((BLangExpression) annotAttachmentAttribute.value.value, env, Lists.of(attributeSymbol.type), DiagnosticCode.INCOMPATIBLE_TYPES); } else { BLangAnnotationAttachment childAttachment = (BLangAnnotationAttachment) annotAttachmentAttribute.value.value; BSymbol symbol = this.symResolver.resolveAnnotation(childAttachment.pos, env, names.fromString(childAttachment.pkgAlias.getValue()), names.fromString(childAttachment.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(childAttachment.pos, DiagnosticCode.UNDEFINED_ANNOTATION, childAttachment.getAnnotationName().getValue()); return; } childAttachment.type = symbol.type; this.types.checkType(childAttachment.pos, childAttachment.type, attributeSymbol.type, DiagnosticCode.INCOMPATIBLE_TYPES); } } annotAttachmentAttribute.value.arrayValues.forEach(value -> { if (value.value instanceof BLangAnnotationAttachment) { BLangAnnotationAttachment childAttachment = (BLangAnnotationAttachment) value.value; if (childAttachment != null) { BSymbol symbol = this.symResolver.resolveAnnotation(childAttachment.pos, env, names.fromString(childAttachment.pkgAlias.getValue()), names.fromString(childAttachment.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, childAttachment.getAnnotationName().getValue()); return; } childAttachment.type = symbol.type; childAttachment.annotationSymbol = (BAnnotationSymbol) symbol; this.types.checkType(childAttachment.pos, childAttachment.type, ((BArrayType) attributeSymbol.type).eType, DiagnosticCode.INCOMPATIBLE_TYPES); validateAttributes(childAttachment, (BAnnotationSymbol) symbol); } } else { this.typeChecker.checkExpr((BLangExpression) value.value, env, Lists.of(((BArrayType) attributeSymbol.type).eType), DiagnosticCode.INCOMPATIBLE_TYPES); } }); } else { if (annotAttachmentAttribute.value.value == null) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.INCOMPATIBLE_TYPES_ARRAY_FOUND, attributeSymbol.type); } BLangAnnotationAttachment childAttachment = (BLangAnnotationAttachment) annotAttachmentAttribute.value.value; if (childAttachment != null) { BSymbol symbol = this.symResolver.resolveAnnotation(childAttachment.pos, env, names.fromString(childAttachment.pkgAlias.getValue()), names.fromString(childAttachment.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, childAttachment.getAnnotationName().getValue()); return; } childAttachment.type = symbol.type; childAttachment.annotationSymbol = (BAnnotationSymbol) symbol; this.types.checkType(childAttachment.pos, childAttachment.type, attributeSymbol.type, DiagnosticCode.INCOMPATIBLE_TYPES); validateAttributes(childAttachment, (BAnnotationSymbol) symbol); } } } }); } private void populateDefaultValues(BLangAnnotationAttachment annAttachmentNode, BAnnotationSymbol annotationSymbol) { for (BAnnotationAttributeSymbol defAttribute : annotationSymbol.attributes) { BLangAnnotAttachmentAttribute[] attributeArrray = new BLangAnnotAttachmentAttribute[annAttachmentNode.getAttributes().size()]; Optional<BLangAnnotAttachmentAttribute> matchingAttribute = Arrays .stream(annAttachmentNode.getAttributes().toArray(attributeArrray)) .filter(attribute -> attribute.name.value.equals(defAttribute.name.getValue())) .findAny(); if (!matchingAttribute.isPresent()) { if (defAttribute.expr != null) { BLangAnnotAttachmentAttributeValue value = new BLangAnnotAttachmentAttributeValue(); value.value = defAttribute.expr; BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode(); name.value = defAttribute.name.getValue(); BLangAnnotAttachmentAttribute attribute = new BLangAnnotAttachmentAttribute(name, value); annAttachmentNode.addAttribute(attribute); } continue; } if (matchingAttribute.get().value.value != null && !(matchingAttribute.get().value.value instanceof BLangAnnotationAttachment)) { continue; } if (matchingAttribute.get().value.arrayValues.size() > 0) { for (BLangAnnotAttachmentAttributeValue attr : matchingAttribute.get().value.arrayValues) { if (attr.value != null && !(attr.value instanceof BLangAnnotationAttachment)) { continue; } BLangAnnotationAttachment attachment = (BLangAnnotationAttachment) attr.value; if (attachment != null) { BSymbol symbol = this.symResolver.resolveAnnotation(attachment.pos, env, names.fromString(attachment.pkgAlias.getValue()), names.fromString(attachment.getAnnotationName().getValue())); attachment.annotationSymbol = (BAnnotationSymbol) symbol; if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, attachment.getAnnotationName().getValue()); return; } populateDefaultValues(attachment, (BAnnotationSymbol) symbol); } } } else { BLangAnnotationAttachment attachment = (BLangAnnotationAttachment) matchingAttribute.get().value.value; if (attachment != null) { BSymbol symbol = this.symResolver.resolveAnnotation(attachment.pos, env, names.fromString(attachment.pkgAlias.getValue()), names.fromString(attachment.getAnnotationName().getValue())); attachment.annotationSymbol = (BAnnotationSymbol) symbol; if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, attachment.getAnnotationName().getValue()); return; } populateDefaultValues(attachment, (BAnnotationSymbol) symbol); } } } } public void visit(BLangVariable varNode) { int ownerSymTag = env.scope.owner.tag; if ((ownerSymTag & SymTag.INVOKABLE) == SymTag.INVOKABLE) { if (varNode.symbol == null) { symbolEnter.defineNode(varNode, env); } } if (varNode.expr != null) { SymbolEnv varInitEnv = SymbolEnv.createVarInitEnv(varNode, env, varNode.symbol); if ((ownerSymTag & SymTag.PACKAGE) != SymTag.PACKAGE && (ownerSymTag & SymTag.SERVICE) != SymTag.SERVICE && (ownerSymTag & SymTag.CONNECTOR) != SymTag.CONNECTOR) { typeChecker.checkExpr(varNode.expr, varInitEnv, Lists.of(varNode.symbol.type)); } if (varNode.symbol.flags == Flags.CONST) { varNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.CONST, null); this.analyzeDef(a, varInitEnv); }); } } varNode.docAttachments.forEach(doc -> { doc.accept(this); }); varNode.type = varNode.symbol.type; } public void visit(BLangBlockStmt blockNode) { SymbolEnv blockEnv = SymbolEnv.createBlockEnv(blockNode, env); blockNode.stmts.forEach(stmt -> analyzeStmt(stmt, blockEnv)); } public void visit(BLangVariableDef varDefNode) { analyzeDef(varDefNode.var, env); } public void visit(BLangAssignment assignNode) { if (assignNode.isDeclaredWithVar()) { handleAssignNodeWithVar(assignNode); return; } List<BType> expTypes = new ArrayList<>(); for (BLangExpression expr : assignNode.varRefs) { if (expr.getKind() != NodeKind.SIMPLE_VARIABLE_REF && expr.getKind() != NodeKind.INDEX_BASED_ACCESS_EXPR && expr.getKind() != NodeKind.FIELD_BASED_ACCESS_EXPR && expr.getKind() != NodeKind.XML_ATTRIBUTE_ACCESS_EXPR) { dlog.error(expr.pos, DiagnosticCode.INVALID_VARIABLE_ASSIGNMENT, expr); expTypes.add(symTable.errType); continue; } BLangVariableReference varRef = (BLangVariableReference) expr; varRef.lhsVar = true; typeChecker.checkExpr(varRef, env).get(0); if (varRef.getKind() == NodeKind.FIELD_BASED_ACCESS_EXPR && ((BLangFieldBasedAccess) varRef).expr.type.tag == TypeTags.ENUM) { dlog.error(varRef.pos, DiagnosticCode.INVALID_VARIABLE_ASSIGNMENT, varRef); expTypes.add(symTable.errType); continue; } expTypes.add(varRef.type); checkConstantAssignment(varRef); } typeChecker.checkExpr(assignNode.expr, this.env, expTypes); } public void visit(BLangBind bindNode) { List<BType> expTypes = new ArrayList<>(); BLangExpression varRef = bindNode.varRef; ((BLangVariableReference) varRef).lhsVar = true; expTypes.add(typeChecker.checkExpr(varRef, env).get(0)); checkConstantAssignment(varRef); typeChecker.checkExpr(bindNode.expr, this.env, expTypes); } private void checkConstantAssignment(BLangExpression varRef) { if (varRef.type == symTable.errType) { return; } if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF) { return; } BLangSimpleVarRef simpleVarRef = (BLangSimpleVarRef) varRef; if (simpleVarRef.pkgSymbol != null && simpleVarRef.pkgSymbol.tag == SymTag.XMLNS) { dlog.error(varRef.pos, DiagnosticCode.XML_QNAME_UPDATE_NOT_ALLOWED); return; } Name varName = names.fromIdNode(simpleVarRef.variableName); if (!Names.IGNORE.equals(varName) && simpleVarRef.symbol.flags == Flags.CONST && env.enclInvokable != env.enclPkg.initFunction) { dlog.error(varRef.pos, DiagnosticCode.CANNOT_ASSIGN_VALUE_CONSTANT, varRef); } } public void visit(BLangExpressionStmt exprStmtNode) { SymbolEnv stmtEnv = new SymbolEnv(exprStmtNode, this.env.scope); this.env.copyTo(stmtEnv); List<BType> bTypes = typeChecker.checkExpr(exprStmtNode.expr, stmtEnv, new ArrayList<>()); if (bTypes.size() > 0 && !(bTypes.size() == 1 && bTypes.get(0) == symTable.errType)) { dlog.error(exprStmtNode.pos, DiagnosticCode.ASSIGNMENT_REQUIRED); } } public void visit(BLangIf ifNode) { typeChecker.checkExpr(ifNode.expr, env, Lists.of(symTable.booleanType)); analyzeStmt(ifNode.body, env); if (ifNode.elseStmt != null) { analyzeStmt(ifNode.elseStmt, env); } } public void visit(BLangForeach foreach) { typeChecker.checkExpr(foreach.collection, env); foreach.varTypes = types.checkForeachTypes(foreach.collection, foreach.varRefs.size()); SymbolEnv blockEnv = SymbolEnv.createBlockEnv(foreach.body, env); handleForeachVariables(foreach, foreach.varTypes, blockEnv); analyzeStmt(foreach.body, blockEnv); } public void visit(BLangWhile whileNode) { typeChecker.checkExpr(whileNode.expr, env, Lists.of(symTable.booleanType)); analyzeStmt(whileNode.body, env); } @Override public void visit(BLangLock lockNode) { analyzeStmt(lockNode.body, env); } public void visit(BLangConnector connectorNode) { BSymbol connectorSymbol = connectorNode.symbol; SymbolEnv connectorEnv = SymbolEnv.createConnectorEnv(connectorNode, connectorSymbol.scope, env); connectorNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.CONNECTOR, null); this.analyzeDef(a, connectorEnv); }); connectorNode.params.forEach(param -> this.analyzeDef(param, connectorEnv)); connectorNode.varDefs.forEach(varDef -> this.analyzeDef(varDef, connectorEnv)); this.analyzeDef(connectorNode.initFunction, connectorEnv); connectorNode.actions.forEach(action -> this.analyzeDef(action, connectorEnv)); this.analyzeDef(connectorNode.initAction, connectorEnv); connectorNode.docAttachments.forEach(doc -> analyzeDef(doc, connectorEnv)); } public void visit(BLangAction actionNode) { BSymbol actionSymbol = actionNode.symbol; SymbolEnv actionEnv = SymbolEnv.createResourceActionSymbolEnv(actionNode, actionSymbol.scope, env); actionNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.ACTION, null); this.analyzeDef(a, actionEnv); }); if (Symbols.isNative(actionSymbol)) { return; } actionNode.params.forEach(p -> this.analyzeDef(p, actionEnv)); analyzeStmt(actionNode.body, actionEnv); this.processWorkers(actionNode, actionEnv); actionNode.docAttachments.forEach(doc -> analyzeDef(doc, actionEnv)); } public void visit(BLangService serviceNode) { BSymbol serviceSymbol = serviceNode.symbol; SymbolEnv serviceEnv = SymbolEnv.createPkgLevelSymbolEnv(serviceNode, serviceSymbol.scope, env); BSymbol protocolPkg = symResolver.resolvePkgSymbol(serviceNode.pos, serviceEnv, names.fromIdNode(serviceNode.protocolPkgIdentifier)); ((BTypeSymbol) serviceSymbol).protocolPkgId = protocolPkg.pkgID; serviceNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.SERVICE, protocolPkg.pkgID.name.getValue()); this.analyzeDef(a, serviceEnv); }); serviceNode.vars.forEach(v -> this.analyzeDef(v, serviceEnv)); this.analyzeDef(serviceNode.initFunction, serviceEnv); serviceNode.resources.forEach(r -> this.analyzeDef(r, serviceEnv)); serviceNode.docAttachments.forEach(doc -> analyzeDef(doc, serviceEnv)); } public void visit(BLangResource resourceNode) { BSymbol resourceSymbol = resourceNode.symbol; SymbolEnv resourceEnv = SymbolEnv.createResourceActionSymbolEnv(resourceNode, resourceSymbol.scope, env); resourceNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.RESOURCE, null); this.analyzeDef(a, resourceEnv); }); resourceNode.params.forEach(p -> this.analyzeDef(p, resourceEnv)); analyzeStmt(resourceNode.body, resourceEnv); this.processWorkers(resourceNode, resourceEnv); resourceNode.docAttachments.forEach(doc -> analyzeDef(doc, resourceEnv)); } public void visit(BLangTryCatchFinally tryCatchFinally) { analyzeStmt(tryCatchFinally.tryBody, env); tryCatchFinally.catchBlocks.forEach(c -> analyzeNode(c, env)); if (tryCatchFinally.finallyBody != null) { analyzeStmt(tryCatchFinally.finallyBody, env); } } public void visit(BLangCatch bLangCatch) { SymbolEnv catchBlockEnv = SymbolEnv.createBlockEnv(bLangCatch.body, env); analyzeNode(bLangCatch.param, catchBlockEnv); if (!this.types.checkStructEquivalency(bLangCatch.param.type, symTable.errStructType)) { dlog.error(bLangCatch.param.pos, DiagnosticCode.INCOMPATIBLE_TYPES, symTable.errStructType, bLangCatch.param.type); } analyzeStmt(bLangCatch.body, catchBlockEnv); } @Override public void visit(BLangTransaction transactionNode) { analyzeStmt(transactionNode.transactionBody, env); if (transactionNode.failedBody != null) { analyzeStmt(transactionNode.failedBody, env); } if (transactionNode.retryCount != null) { typeChecker.checkExpr(transactionNode.retryCount, env, Lists.of(symTable.intType)); checkRetryStmtValidity(transactionNode.retryCount); } } @Override public void visit(BLangAbort abortNode) { } private boolean isJoinResultType(BLangVariable var) { BLangType type = var.typeNode; if (type instanceof BuiltInReferenceTypeNode) { return ((BuiltInReferenceTypeNode) type).getTypeKind() == TypeKind.MAP; } return false; } private BLangVariableDef createVarDef(BLangVariable var) { BLangVariableDef varDefNode = new BLangVariableDef(); varDefNode.var = var; varDefNode.pos = var.pos; return varDefNode; } private BLangBlockStmt generateCodeBlock(StatementNode... statements) { BLangBlockStmt block = new BLangBlockStmt(); for (StatementNode stmt : statements) { block.addStatement(stmt); } return block; } @Override public void visit(BLangForkJoin forkJoin) { SymbolEnv forkJoinEnv = SymbolEnv.createFolkJoinEnv(forkJoin, this.env); forkJoin.workers.forEach(e -> this.symbolEnter.defineNode(e, forkJoinEnv)); forkJoin.workers.forEach(e -> this.analyzeDef(e, forkJoinEnv)); if (!this.isJoinResultType(forkJoin.joinResultVar)) { this.dlog.error(forkJoin.joinResultVar.pos, DiagnosticCode.INVALID_WORKER_JOIN_RESULT_TYPE); } /* create code black and environment for join result section, i.e. (map results) */ BLangBlockStmt joinResultsBlock = this.generateCodeBlock(this.createVarDef(forkJoin.joinResultVar)); SymbolEnv joinResultsEnv = SymbolEnv.createBlockEnv(joinResultsBlock, this.env); this.analyzeNode(joinResultsBlock, joinResultsEnv); /* create an environment for the join body, making the enclosing environment the earlier * join result's environment */ SymbolEnv joinBodyEnv = SymbolEnv.createBlockEnv(forkJoin.joinedBody, joinResultsEnv); this.analyzeNode(forkJoin.joinedBody, joinBodyEnv); if (forkJoin.timeoutExpression != null) { if (!this.isJoinResultType(forkJoin.timeoutVariable)) { this.dlog.error(forkJoin.timeoutVariable.pos, DiagnosticCode.INVALID_WORKER_TIMEOUT_RESULT_TYPE); } /* create code black and environment for timeout section */ BLangBlockStmt timeoutVarBlock = this.generateCodeBlock(this.createVarDef(forkJoin.timeoutVariable)); SymbolEnv timeoutVarEnv = SymbolEnv.createBlockEnv(timeoutVarBlock, this.env); this.typeChecker.checkExpr(forkJoin.timeoutExpression, timeoutVarEnv, Arrays.asList(symTable.intType)); this.analyzeNode(timeoutVarBlock, timeoutVarEnv); /* create an environment for the timeout body, making the enclosing environment the earlier * timeout var's environment */ SymbolEnv timeoutBodyEnv = SymbolEnv.createBlockEnv(forkJoin.timeoutBody, timeoutVarEnv); this.analyzeNode(forkJoin.timeoutBody, timeoutBodyEnv); } this.validateJoinWorkerList(forkJoin, forkJoinEnv); } private void validateJoinWorkerList(BLangForkJoin forkJoin, SymbolEnv forkJoinEnv) { forkJoin.joinedWorkers.forEach(e -> { if (!this.workerExists(forkJoinEnv, e.value)) { this.dlog.error(forkJoin.pos, DiagnosticCode.UNDEFINED_WORKER, e.value); } }); } @Override public void visit(BLangWorker workerNode) { SymbolEnv workerEnv = SymbolEnv.createWorkerEnv(workerNode, this.env); this.analyzeNode(workerNode.body, workerEnv); } private boolean isInTopLevelWorkerEnv() { return this.env.enclEnv.node.getKind() == NodeKind.WORKER; } private boolean workerExists(SymbolEnv env, String workerName) { BSymbol symbol = this.symResolver.lookupSymbol(env, new Name(workerName), SymTag.WORKER); return (symbol != this.symTable.notFoundSymbol); } @Override public void visit(BLangWorkerSend workerSendNode) { workerSendNode.env = this.env; workerSendNode.exprs.forEach(e -> this.typeChecker.checkExpr(e, this.env)); if (!this.isInTopLevelWorkerEnv()) { this.dlog.error(workerSendNode.pos, DiagnosticCode.INVALID_WORKER_SEND_POSITION); } if (!workerSendNode.isForkJoinSend) { String workerName = workerSendNode.workerIdentifier.getValue(); if (!this.workerExists(this.env, workerName)) { this.dlog.error(workerSendNode.pos, DiagnosticCode.UNDEFINED_WORKER, workerName); } } } @Override public void visit(BLangWorkerReceive workerReceiveNode) { workerReceiveNode.exprs.forEach(e -> this.typeChecker.checkExpr(e, this.env)); if (!this.isInTopLevelWorkerEnv()) { this.dlog.error(workerReceiveNode.pos, DiagnosticCode.INVALID_WORKER_RECEIVE_POSITION); } String workerName = workerReceiveNode.workerIdentifier.getValue(); if (!this.workerExists(this.env, workerName)) { this.dlog.error(workerReceiveNode.pos, DiagnosticCode.UNDEFINED_WORKER, workerName); } } private boolean checkReturnValueCounts(BLangReturn returnNode) { boolean success = false; int expRetCount = this.env.enclInvokable.getReturnParameters().size(); int actualRetCount = returnNode.exprs.size(); if (expRetCount > 1 && actualRetCount <= 1) { this.dlog.error(returnNode.pos, DiagnosticCode.MULTI_VALUE_RETURN_EXPECTED); } else if (expRetCount == 1 && actualRetCount > 1) { this.dlog.error(returnNode.pos, DiagnosticCode.SINGLE_VALUE_RETURN_EXPECTED); } else if (expRetCount == 0 && actualRetCount >= 1) { this.dlog.error(returnNode.pos, DiagnosticCode.RETURN_VALUE_NOT_EXPECTED); } else if (expRetCount > actualRetCount) { this.dlog.error(returnNode.pos, DiagnosticCode.NOT_ENOUGH_RETURN_VALUES); } else if (expRetCount < actualRetCount) { this.dlog.error(returnNode.pos, DiagnosticCode.TOO_MANY_RETURN_VALUES); } else { success = true; } return success; } private boolean isInvocationExpr(BLangExpression expr) { return expr.getKind() == NodeKind.INVOCATION; } @Override public void visit(BLangReturn returnNode) { if (returnNode.exprs.size() == 1 && this.isInvocationExpr(returnNode.exprs.get(0))) { /* a single return expression can be expanded to match a multi-value return */ this.typeChecker.checkExpr(returnNode.exprs.get(0), this.env, this.env.enclInvokable.getReturnParameters().stream() .map(e -> e.getTypeNode().type) .collect(Collectors.toList())); } else { if (returnNode.exprs.size() == 0 && this.env.enclInvokable.getReturnParameters().size() > 0 && !this.env.enclInvokable.getReturnParameters().get(0).name.value.isEmpty()) { returnNode.namedReturnVariables = this.env.enclInvokable.getReturnParameters(); return; } if (this.checkReturnValueCounts(returnNode)) { for (int i = 0; i < returnNode.exprs.size(); i++) { this.typeChecker.checkExpr(returnNode.exprs.get(i), this.env, Arrays.asList(this.env.enclInvokable.getReturnParameters().get(i).getTypeNode().type)); } } } } BType analyzeDef(BLangNode node, SymbolEnv env) { return analyzeNode(node, env); } BType analyzeStmt(BLangStatement stmtNode, SymbolEnv env) { return analyzeNode(stmtNode, env); } BType analyzeNode(BLangNode node, SymbolEnv env) { return analyzeNode(node, env, symTable.noType, null); } public void visit(BLangNext nextNode) { /* ignore */ } public void visit(BLangBreak breakNode) { /* ignore */ } @Override public void visit(BLangThrow throwNode) { this.typeChecker.checkExpr(throwNode.expr, env); if (!types.checkStructEquivalency(throwNode.expr.type, symTable.errStructType)) { dlog.error(throwNode.expr.pos, DiagnosticCode.INCOMPATIBLE_TYPES, symTable.errStructType, throwNode.expr.type); } } @Override public void visit(BLangTransformer transformerNode) { SymbolEnv transformerEnv = SymbolEnv.createTransformerEnv(transformerNode, transformerNode.symbol.scope, env); transformerNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint( BLangAnnotationAttachmentPoint.AttachmentPoint.TRANSFORMER, null); this.analyzeDef(annotationAttachment, transformerEnv); }); validateTransformerMappingType(transformerNode.source); validateTransformerMappingType(transformerNode.retParams.get(0)); analyzeStmt(transformerNode.body, transformerEnv); int returnCount = transformerNode.retParams.size(); if (returnCount == 0) { dlog.error(transformerNode.pos, DiagnosticCode.TRANSFORMER_MUST_HAVE_OUTPUT); } else if (returnCount > 1) { dlog.error(transformerNode.pos, DiagnosticCode.TOO_MANY_OUTPUTS_FOR_TRANSFORMER, 1, returnCount); } this.processWorkers(transformerNode, transformerEnv); transformerNode.docAttachments.forEach(doc -> analyzeDef(doc, transformerEnv)); } BType analyzeNode(BLangNode node, SymbolEnv env, BType expType, DiagnosticCode diagCode) { SymbolEnv prevEnv = this.env; BType preExpType = this.expType; DiagnosticCode preDiagCode = this.diagCode; this.env = env; this.expType = expType; this.diagCode = diagCode; node.accept(this); this.env = prevEnv; this.expType = preExpType; this.diagCode = preDiagCode; return resType; } private void handleForeachVariables(BLangForeach foreachStmt, List<BType> varTypes, SymbolEnv env) { for (int i = 0; i < foreachStmt.varRefs.size(); i++) { BLangExpression varRef = foreachStmt.varRefs.get(i); if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF) { dlog.error(varRef.pos, DiagnosticCode.INVALID_VARIABLE_ASSIGNMENT, varRef); continue; } BLangSimpleVarRef simpleVarRef = (BLangSimpleVarRef) varRef; simpleVarRef.lhsVar = true; Name varName = names.fromIdNode(simpleVarRef.variableName); if (varName == Names.IGNORE) { simpleVarRef.type = this.symTable.noType; typeChecker.checkExpr(simpleVarRef, env); continue; } BSymbol symbol = symResolver.lookupSymbol(env, varName, SymTag.VARIABLE); if (symbol == symTable.notFoundSymbol) { symbolEnter.defineVarSymbol(simpleVarRef.pos, Collections.emptySet(), varTypes.get(i), varName, env); typeChecker.checkExpr(simpleVarRef, env); } else { dlog.error(simpleVarRef.pos, DiagnosticCode.REDECLARED_SYMBOL, varName); } } } private void handleAssignNodeWithVar(BLangAssignment assignNode) { int ignoredCount = 0; int createdSymbolCount = 0; List<Name> newVariables = new ArrayList<Name>(); List<BType> expTypes = new ArrayList<>(); for (int i = 0; i < assignNode.varRefs.size(); i++) { BLangExpression varRef = assignNode.varRefs.get(i); if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF) { dlog.error(varRef.pos, DiagnosticCode.INVALID_VARIABLE_ASSIGNMENT, varRef); expTypes.add(symTable.errType); continue; } BLangSimpleVarRef simpleVarRef = (BLangSimpleVarRef) varRef; ((BLangVariableReference) varRef).lhsVar = true; Name varName = names.fromIdNode(simpleVarRef.variableName); if (varName == Names.IGNORE) { ignoredCount++; simpleVarRef.type = this.symTable.noType; expTypes.add(symTable.noType); typeChecker.checkExpr(simpleVarRef, env); continue; } BSymbol symbol = symResolver.lookupSymbol(env, varName, SymTag.VARIABLE); if (symbol == symTable.notFoundSymbol) { createdSymbolCount++; newVariables.add(varName); expTypes.add(symTable.noType); } else { expTypes.add(symbol.type); } } if (ignoredCount == assignNode.varRefs.size() || createdSymbolCount == 0) { dlog.error(assignNode.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); } final List<BType> rhsTypes = typeChecker.checkExpr(assignNode.expr, this.env, expTypes); for (int i = 0; i < assignNode.varRefs.size(); i++) { BLangExpression varRef = assignNode.varRefs.get(i); if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF) { continue; } BType actualType = rhsTypes.get(i); BLangSimpleVarRef simpleVarRef = (BLangSimpleVarRef) varRef; Name varName = names.fromIdNode(simpleVarRef.variableName); if (newVariables.contains(varName)) { this.symbolEnter.defineVarSymbol(simpleVarRef.pos, Collections.emptySet(), actualType, varName, env); } typeChecker.checkExpr(simpleVarRef, env); } } private void checkRetryStmtValidity(BLangExpression retryCountExpr) { boolean error = true; NodeKind retryKind = retryCountExpr.getKind(); if (retryKind == NodeKind.LITERAL) { if (retryCountExpr.type.tag == TypeTags.INT) { int retryCount = Integer.parseInt(((BLangLiteral) retryCountExpr).getValue().toString()); if (retryCount >= 0) { error = false; } } } else if (retryKind == NodeKind.SIMPLE_VARIABLE_REF) { if (((BLangSimpleVarRef) retryCountExpr).symbol.flags == Flags.CONST) { if (((BLangSimpleVarRef) retryCountExpr).symbol.type.tag == TypeTags.INT) { error = false; } } } if (error) { this.dlog.error(retryCountExpr.pos, DiagnosticCode.INVALID_RETRY_COUNT); } } private void validateTransformerMappingType(BLangVariable param) { BType type = param.type; if (types.isValueType(type) || (type instanceof BBuiltInRefType) || type.tag == TypeTags.STRUCT) { return; } dlog.error(param.pos, DiagnosticCode.TRANSFORMER_UNSUPPORTED_TYPES, type); } }
class SemanticAnalyzer extends BLangNodeVisitor { private static final CompilerContext.Key<SemanticAnalyzer> SYMBOL_ANALYZER_KEY = new CompilerContext.Key<>(); private SymbolTable symTable; private SymbolEnter symbolEnter; private Names names; private SymbolResolver symResolver; private TypeChecker typeChecker; private Types types; private DiagnosticLog dlog; private SymbolEnv env; private BType expType; private DiagnosticCode diagCode; private BType resType; public static SemanticAnalyzer getInstance(CompilerContext context) { SemanticAnalyzer semAnalyzer = context.get(SYMBOL_ANALYZER_KEY); if (semAnalyzer == null) { semAnalyzer = new SemanticAnalyzer(context); } return semAnalyzer; } public SemanticAnalyzer(CompilerContext context) { context.put(SYMBOL_ANALYZER_KEY, this); this.symTable = SymbolTable.getInstance(context); this.symbolEnter = SymbolEnter.getInstance(context); this.names = Names.getInstance(context); this.symResolver = SymbolResolver.getInstance(context); this.typeChecker = TypeChecker.getInstance(context); this.types = Types.getInstance(context); this.dlog = DiagnosticLog.getInstance(context); } public BLangPackage analyze(BLangPackage pkgNode) { pkgNode.accept(this); return pkgNode; } public void visit(BLangPackage pkgNode) { if (pkgNode.completedPhases.contains(CompilerPhase.TYPE_CHECK)) { return; } SymbolEnv pkgEnv = symbolEnter.packageEnvs.get(pkgNode.symbol); pkgNode.imports.forEach(importNode -> analyzeDef(importNode, pkgEnv)); pkgNode.topLevelNodes.forEach(topLevelNode -> analyzeDef((BLangNode) topLevelNode, pkgEnv)); analyzeDef(pkgNode.initFunction, pkgEnv); pkgNode.completedPhases.add(CompilerPhase.TYPE_CHECK); } public void visit(BLangImportPackage importPkgNode) { BPackageSymbol pkgSymbol = importPkgNode.symbol; SymbolEnv pkgEnv = symbolEnter.packageEnvs.get(pkgSymbol); if (pkgEnv == null) { return; } analyzeDef(pkgEnv.node, pkgEnv); } public void visit(BLangXMLNS xmlnsNode) { xmlnsNode.type = symTable.stringType; if (xmlnsNode.symbol != null) { return; } symbolEnter.defineNode(xmlnsNode, env); typeChecker.checkExpr(xmlnsNode.namespaceURI, env, Lists.of(symTable.stringType)); } public void visit(BLangXMLNSStatement xmlnsStmtNode) { analyzeNode(xmlnsStmtNode.xmlnsDecl, env); } private void processWorkers(BLangInvokableNode invNode, SymbolEnv invEnv) { if (invNode.workers.size() > 0) { invEnv.scope.entries.putAll(invNode.body.scope.entries); invNode.workers.forEach(e -> this.symbolEnter.defineNode(e, invEnv)); invNode.workers.forEach(e -> analyzeNode(e, invEnv)); } } public void visit(BLangStruct structNode) { BSymbol structSymbol = structNode.symbol; SymbolEnv structEnv = SymbolEnv.createPkgLevelSymbolEnv(structNode, structSymbol.scope, env); structNode.fields.forEach(field -> analyzeDef(field, structEnv)); structNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.STRUCT, null); annotationAttachment.accept(this); }); structNode.docAttachments.forEach(doc -> analyzeDef(doc, structEnv)); } @Override public void visit(BLangEnum enumNode) { BSymbol enumSymbol = enumNode.symbol; SymbolEnv enumEnv = SymbolEnv.createPkgLevelSymbolEnv(enumNode, enumSymbol.scope, env); enumNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint( BLangAnnotationAttachmentPoint.AttachmentPoint.ENUM, null); annotationAttachment.accept(this); }); enumNode.docAttachments.forEach(doc -> analyzeDef(doc, enumEnv)); } @Override public void visit(BLangDocumentation docNode) { Set<BLangIdentifier> visitedAttributes = new HashSet<>(); for (BLangDocumentationAttribute attribute : docNode.attributes) { if (!visitedAttributes.add(attribute.documentationField)) { this.dlog.warning(attribute.pos, DiagnosticCode.DUPLICATE_DOCUMENTED_ATTRIBUTE, attribute.documentationField); continue; } Name attributeName = names.fromIdNode(attribute.documentationField); BSymbol attributeSymbol = this.env.scope.lookup(attributeName).symbol; if (attributeSymbol == null) { this.dlog.warning(attribute.pos, DiagnosticCode.NO_SUCH_DOCUMENTABLE_ATTRIBUTE, attribute.documentationField, attribute.docTag.getValue()); continue; } int ownerSymTag = env.scope.owner.tag; if ((ownerSymTag & SymTag.ANNOTATION) == SymTag.ANNOTATION) { if (attributeSymbol.tag != SymTag.ANNOTATION_ATTRIBUTE || ((BAnnotationAttributeSymbol) attributeSymbol).docTag != attribute.docTag) { this.dlog.warning(attribute.pos, DiagnosticCode.NO_SUCH_DOCUMENTABLE_ATTRIBUTE, attribute.documentationField, attribute.docTag.getValue()); continue; } } else { if (attributeSymbol.tag != SymTag.VARIABLE || ((BVarSymbol) attributeSymbol).docTag != attribute.docTag) { this.dlog.warning(attribute.pos, DiagnosticCode.NO_SUCH_DOCUMENTABLE_ATTRIBUTE, attribute.documentationField, attribute.docTag.getValue()); continue; } } attribute.type = attributeSymbol.type; } } public void visit(BLangAnnotation annotationNode) { SymbolEnv annotationEnv = SymbolEnv.createAnnotationEnv(annotationNode, annotationNode.symbol.scope, env); annotationNode.attributes.forEach(attribute -> { analyzeNode(attribute, annotationEnv); }); annotationNode.attachmentPoints.forEach(point -> { if (point.pkgAlias != null) { BSymbol pkgSymbol = symResolver.resolvePkgSymbol(annotationNode.pos, annotationEnv, names.fromIdNode(point.pkgAlias)); if (pkgSymbol == symTable.notFoundSymbol) { return; } point.pkgPath = pkgSymbol.pkgID.name.getValue(); } }); annotationNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.ANNOTATION, null); annotationAttachment.accept(this); }); annotationNode.docAttachments.forEach(doc -> analyzeDef(doc, annotationEnv)); } public void visit(BLangAnnotAttribute annotationAttribute) { if (annotationAttribute.expr != null) { BType actualType = this.typeChecker.checkExpr(annotationAttribute.expr, env, Lists.of(annotationAttribute.symbol.type), DiagnosticCode.INVALID_OPERATION_INCOMPATIBLE_TYPES).get(0); if (!(this.types.isValueType(annotationAttribute.symbol.type) && this.types.isValueType(actualType))) { this.dlog.error(annotationAttribute.pos, DiagnosticCode.INVALID_DEFAULT_VALUE); } } else { if (!this.types.isAnnotationFieldType(annotationAttribute.symbol.type)) { this.dlog.error(annotationAttribute.pos, DiagnosticCode.INVALID_ATTRIBUTE_TYPE, annotationAttribute.symbol.type); } } } public void visit(BLangAnnotationAttachment annAttachmentNode) { BSymbol symbol = this.symResolver.resolveAnnotation(annAttachmentNode.pos, env, names.fromString(annAttachmentNode.pkgAlias.getValue()), names.fromString(annAttachmentNode.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, annAttachmentNode.getAnnotationName().getValue()); return; } BAnnotationSymbol annotationSymbol = (BAnnotationSymbol) symbol; annAttachmentNode.annotationSymbol = annotationSymbol; if (annotationSymbol.getAttachmentPoints() != null && annotationSymbol.getAttachmentPoints().size() > 0) { BLangAnnotationAttachmentPoint[] attachmentPointsArrray = new BLangAnnotationAttachmentPoint[annotationSymbol.getAttachmentPoints().size()]; Optional<BLangAnnotationAttachmentPoint> matchingAttachmentPoint = Arrays .stream(annotationSymbol.getAttachmentPoints().toArray(attachmentPointsArrray)) .filter(attachmentPoint -> attachmentPoint.equals(annAttachmentNode.attachmentPoint)) .findAny(); if (!matchingAttachmentPoint.isPresent()) { String msg = annAttachmentNode.attachmentPoint.getAttachmentPoint().getValue(); if (annAttachmentNode.attachmentPoint.getPkgPath() != null) { msg = annAttachmentNode.attachmentPoint.getAttachmentPoint().getValue() + "<" + annAttachmentNode.attachmentPoint.getPkgPath() + ">"; } this.dlog.error(annAttachmentNode.pos, DiagnosticCode.ANNOTATION_NOT_ALLOWED, annotationSymbol, msg); } } validateAttributes(annAttachmentNode, annotationSymbol); populateDefaultValues(annAttachmentNode, annotationSymbol); } private void validateAttributes(BLangAnnotationAttachment annAttachmentNode, BAnnotationSymbol annotationSymbol) { annAttachmentNode.attributes.forEach(annotAttachmentAttribute -> { Name attributeName = names.fromIdNode((BLangIdentifier) annotAttachmentAttribute.getName()); BAnnotationAttributeSymbol attributeSymbol = (BAnnotationAttributeSymbol) annotationSymbol.scope.lookup(attributeName).symbol; if (attributeSymbol == null) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.NO_SUCH_ATTRIBUTE, annotAttachmentAttribute.getName(), annotationSymbol.name); return; } if (annotAttachmentAttribute.value.value != null && annotAttachmentAttribute.value.value instanceof BLangExpression) { BType resolvedType = this.typeChecker.checkExpr((BLangExpression) annotAttachmentAttribute.value.value, env, Lists.of(attributeSymbol.type), DiagnosticCode.INCOMPATIBLE_TYPES).get(0); if (resolvedType == symTable.errType) { return; } if (annotAttachmentAttribute.value.value instanceof BLangSimpleVarRef && ((BLangSimpleVarRef) annotAttachmentAttribute.value.value).symbol.flags != Flags.CONST) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.ATTRIBUTE_VAL_CANNOT_REFER_NON_CONST); } return; } else { if (attributeSymbol.type.tag == TypeTags.ARRAY) { if (annotAttachmentAttribute.value.value != null) { if (annotAttachmentAttribute.value.value instanceof BLangExpression) { this.typeChecker.checkExpr((BLangExpression) annotAttachmentAttribute.value.value, env, Lists.of(attributeSymbol.type), DiagnosticCode.INCOMPATIBLE_TYPES); } else { BLangAnnotationAttachment childAttachment = (BLangAnnotationAttachment) annotAttachmentAttribute.value.value; BSymbol symbol = this.symResolver.resolveAnnotation(childAttachment.pos, env, names.fromString(childAttachment.pkgAlias.getValue()), names.fromString(childAttachment.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(childAttachment.pos, DiagnosticCode.UNDEFINED_ANNOTATION, childAttachment.getAnnotationName().getValue()); return; } childAttachment.type = symbol.type; this.types.checkType(childAttachment.pos, childAttachment.type, attributeSymbol.type, DiagnosticCode.INCOMPATIBLE_TYPES); } } annotAttachmentAttribute.value.arrayValues.forEach(value -> { if (value.value instanceof BLangAnnotationAttachment) { BLangAnnotationAttachment childAttachment = (BLangAnnotationAttachment) value.value; if (childAttachment != null) { BSymbol symbol = this.symResolver.resolveAnnotation(childAttachment.pos, env, names.fromString(childAttachment.pkgAlias.getValue()), names.fromString(childAttachment.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, childAttachment.getAnnotationName().getValue()); return; } childAttachment.type = symbol.type; childAttachment.annotationSymbol = (BAnnotationSymbol) symbol; this.types.checkType(childAttachment.pos, childAttachment.type, ((BArrayType) attributeSymbol.type).eType, DiagnosticCode.INCOMPATIBLE_TYPES); validateAttributes(childAttachment, (BAnnotationSymbol) symbol); } } else { this.typeChecker.checkExpr((BLangExpression) value.value, env, Lists.of(((BArrayType) attributeSymbol.type).eType), DiagnosticCode.INCOMPATIBLE_TYPES); } }); } else { if (annotAttachmentAttribute.value.value == null) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.INCOMPATIBLE_TYPES_ARRAY_FOUND, attributeSymbol.type); } BLangAnnotationAttachment childAttachment = (BLangAnnotationAttachment) annotAttachmentAttribute.value.value; if (childAttachment != null) { BSymbol symbol = this.symResolver.resolveAnnotation(childAttachment.pos, env, names.fromString(childAttachment.pkgAlias.getValue()), names.fromString(childAttachment.getAnnotationName().getValue())); if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, childAttachment.getAnnotationName().getValue()); return; } childAttachment.type = symbol.type; childAttachment.annotationSymbol = (BAnnotationSymbol) symbol; this.types.checkType(childAttachment.pos, childAttachment.type, attributeSymbol.type, DiagnosticCode.INCOMPATIBLE_TYPES); validateAttributes(childAttachment, (BAnnotationSymbol) symbol); } } } }); } private void populateDefaultValues(BLangAnnotationAttachment annAttachmentNode, BAnnotationSymbol annotationSymbol) { for (BAnnotationAttributeSymbol defAttribute : annotationSymbol.attributes) { BLangAnnotAttachmentAttribute[] attributeArrray = new BLangAnnotAttachmentAttribute[annAttachmentNode.getAttributes().size()]; Optional<BLangAnnotAttachmentAttribute> matchingAttribute = Arrays .stream(annAttachmentNode.getAttributes().toArray(attributeArrray)) .filter(attribute -> attribute.name.value.equals(defAttribute.name.getValue())) .findAny(); if (!matchingAttribute.isPresent()) { if (defAttribute.expr != null) { BLangAnnotAttachmentAttributeValue value = new BLangAnnotAttachmentAttributeValue(); value.value = defAttribute.expr; BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode(); name.value = defAttribute.name.getValue(); BLangAnnotAttachmentAttribute attribute = new BLangAnnotAttachmentAttribute(name, value); annAttachmentNode.addAttribute(attribute); } continue; } if (matchingAttribute.get().value.value != null && !(matchingAttribute.get().value.value instanceof BLangAnnotationAttachment)) { continue; } if (matchingAttribute.get().value.arrayValues.size() > 0) { for (BLangAnnotAttachmentAttributeValue attr : matchingAttribute.get().value.arrayValues) { if (attr.value != null && !(attr.value instanceof BLangAnnotationAttachment)) { continue; } BLangAnnotationAttachment attachment = (BLangAnnotationAttachment) attr.value; if (attachment != null) { BSymbol symbol = this.symResolver.resolveAnnotation(attachment.pos, env, names.fromString(attachment.pkgAlias.getValue()), names.fromString(attachment.getAnnotationName().getValue())); attachment.annotationSymbol = (BAnnotationSymbol) symbol; if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, attachment.getAnnotationName().getValue()); return; } populateDefaultValues(attachment, (BAnnotationSymbol) symbol); } } } else { BLangAnnotationAttachment attachment = (BLangAnnotationAttachment) matchingAttribute.get().value.value; if (attachment != null) { BSymbol symbol = this.symResolver.resolveAnnotation(attachment.pos, env, names.fromString(attachment.pkgAlias.getValue()), names.fromString(attachment.getAnnotationName().getValue())); attachment.annotationSymbol = (BAnnotationSymbol) symbol; if (symbol == this.symTable.notFoundSymbol) { this.dlog.error(annAttachmentNode.pos, DiagnosticCode.UNDEFINED_ANNOTATION, attachment.getAnnotationName().getValue()); return; } populateDefaultValues(attachment, (BAnnotationSymbol) symbol); } } } } public void visit(BLangVariable varNode) { int ownerSymTag = env.scope.owner.tag; if ((ownerSymTag & SymTag.INVOKABLE) == SymTag.INVOKABLE) { if (varNode.symbol == null) { symbolEnter.defineNode(varNode, env); } } varNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.CONST, null); a.accept(this); }); varNode.docAttachments.forEach(doc -> { doc.accept(this); }); if (varNode.expr != null) { SymbolEnv varInitEnv = SymbolEnv.createVarInitEnv(varNode, env, varNode.symbol); if ((ownerSymTag & SymTag.PACKAGE) != SymTag.PACKAGE && (ownerSymTag & SymTag.SERVICE) != SymTag.SERVICE && (ownerSymTag & SymTag.CONNECTOR) != SymTag.CONNECTOR) { typeChecker.checkExpr(varNode.expr, varInitEnv, Lists.of(varNode.symbol.type)); } } varNode.type = varNode.symbol.type; } public void visit(BLangBlockStmt blockNode) { SymbolEnv blockEnv = SymbolEnv.createBlockEnv(blockNode, env); blockNode.stmts.forEach(stmt -> analyzeStmt(stmt, blockEnv)); } public void visit(BLangVariableDef varDefNode) { analyzeDef(varDefNode.var, env); } public void visit(BLangAssignment assignNode) { if (assignNode.isDeclaredWithVar()) { handleAssignNodeWithVar(assignNode); return; } List<BType> expTypes = new ArrayList<>(); for (BLangExpression expr : assignNode.varRefs) { if (expr.getKind() != NodeKind.SIMPLE_VARIABLE_REF && expr.getKind() != NodeKind.INDEX_BASED_ACCESS_EXPR && expr.getKind() != NodeKind.FIELD_BASED_ACCESS_EXPR && expr.getKind() != NodeKind.XML_ATTRIBUTE_ACCESS_EXPR) { dlog.error(expr.pos, DiagnosticCode.INVALID_VARIABLE_ASSIGNMENT, expr); expTypes.add(symTable.errType); continue; } BLangVariableReference varRef = (BLangVariableReference) expr; varRef.lhsVar = true; typeChecker.checkExpr(varRef, env).get(0); if (varRef.getKind() == NodeKind.FIELD_BASED_ACCESS_EXPR && ((BLangFieldBasedAccess) varRef).expr.type.tag == TypeTags.ENUM) { dlog.error(varRef.pos, DiagnosticCode.INVALID_VARIABLE_ASSIGNMENT, varRef); expTypes.add(symTable.errType); continue; } expTypes.add(varRef.type); checkConstantAssignment(varRef); } typeChecker.checkExpr(assignNode.expr, this.env, expTypes); } public void visit(BLangBind bindNode) { List<BType> expTypes = new ArrayList<>(); BLangExpression varRef = bindNode.varRef; ((BLangVariableReference) varRef).lhsVar = true; expTypes.add(typeChecker.checkExpr(varRef, env).get(0)); checkConstantAssignment(varRef); typeChecker.checkExpr(bindNode.expr, this.env, expTypes); } private void checkConstantAssignment(BLangExpression varRef) { if (varRef.type == symTable.errType) { return; } if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF) { return; } BLangSimpleVarRef simpleVarRef = (BLangSimpleVarRef) varRef; if (simpleVarRef.pkgSymbol != null && simpleVarRef.pkgSymbol.tag == SymTag.XMLNS) { dlog.error(varRef.pos, DiagnosticCode.XML_QNAME_UPDATE_NOT_ALLOWED); return; } Name varName = names.fromIdNode(simpleVarRef.variableName); if (!Names.IGNORE.equals(varName) && simpleVarRef.symbol.flags == Flags.CONST && env.enclInvokable != env.enclPkg.initFunction) { dlog.error(varRef.pos, DiagnosticCode.CANNOT_ASSIGN_VALUE_CONSTANT, varRef); } } public void visit(BLangExpressionStmt exprStmtNode) { SymbolEnv stmtEnv = new SymbolEnv(exprStmtNode, this.env.scope); this.env.copyTo(stmtEnv); List<BType> bTypes = typeChecker.checkExpr(exprStmtNode.expr, stmtEnv, new ArrayList<>()); if (bTypes.size() > 0 && !(bTypes.size() == 1 && bTypes.get(0) == symTable.errType)) { dlog.error(exprStmtNode.pos, DiagnosticCode.ASSIGNMENT_REQUIRED); } } public void visit(BLangIf ifNode) { typeChecker.checkExpr(ifNode.expr, env, Lists.of(symTable.booleanType)); analyzeStmt(ifNode.body, env); if (ifNode.elseStmt != null) { analyzeStmt(ifNode.elseStmt, env); } } public void visit(BLangForeach foreach) { typeChecker.checkExpr(foreach.collection, env); foreach.varTypes = types.checkForeachTypes(foreach.collection, foreach.varRefs.size()); SymbolEnv blockEnv = SymbolEnv.createBlockEnv(foreach.body, env); handleForeachVariables(foreach, foreach.varTypes, blockEnv); analyzeStmt(foreach.body, blockEnv); } public void visit(BLangWhile whileNode) { typeChecker.checkExpr(whileNode.expr, env, Lists.of(symTable.booleanType)); analyzeStmt(whileNode.body, env); } @Override public void visit(BLangLock lockNode) { analyzeStmt(lockNode.body, env); } public void visit(BLangConnector connectorNode) { BSymbol connectorSymbol = connectorNode.symbol; SymbolEnv connectorEnv = SymbolEnv.createConnectorEnv(connectorNode, connectorSymbol.scope, env); connectorNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.CONNECTOR, null); this.analyzeDef(a, connectorEnv); }); connectorNode.docAttachments.forEach(doc -> analyzeDef(doc, connectorEnv)); connectorNode.params.forEach(param -> this.analyzeDef(param, connectorEnv)); connectorNode.varDefs.forEach(varDef -> this.analyzeDef(varDef, connectorEnv)); this.analyzeDef(connectorNode.initFunction, connectorEnv); connectorNode.actions.forEach(action -> this.analyzeDef(action, connectorEnv)); this.analyzeDef(connectorNode.initAction, connectorEnv); } public void visit(BLangAction actionNode) { BSymbol actionSymbol = actionNode.symbol; SymbolEnv actionEnv = SymbolEnv.createResourceActionSymbolEnv(actionNode, actionSymbol.scope, env); actionNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.ACTION, null); this.analyzeDef(a, actionEnv); }); actionNode.docAttachments.forEach(doc -> analyzeDef(doc, actionEnv)); if (Symbols.isNative(actionSymbol)) { return; } actionNode.params.forEach(p -> this.analyzeDef(p, actionEnv)); analyzeStmt(actionNode.body, actionEnv); this.processWorkers(actionNode, actionEnv); } public void visit(BLangService serviceNode) { BSymbol serviceSymbol = serviceNode.symbol; SymbolEnv serviceEnv = SymbolEnv.createPkgLevelSymbolEnv(serviceNode, serviceSymbol.scope, env); BSymbol protocolPkg = symResolver.resolvePkgSymbol(serviceNode.pos, serviceEnv, names.fromIdNode(serviceNode.protocolPkgIdentifier)); ((BTypeSymbol) serviceSymbol).protocolPkgId = protocolPkg.pkgID; serviceNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.SERVICE, protocolPkg.pkgID.name.getValue()); this.analyzeDef(a, serviceEnv); }); serviceNode.docAttachments.forEach(doc -> analyzeDef(doc, serviceEnv)); serviceNode.vars.forEach(v -> this.analyzeDef(v, serviceEnv)); this.analyzeDef(serviceNode.initFunction, serviceEnv); serviceNode.resources.forEach(r -> this.analyzeDef(r, serviceEnv)); } public void visit(BLangResource resourceNode) { BSymbol resourceSymbol = resourceNode.symbol; SymbolEnv resourceEnv = SymbolEnv.createResourceActionSymbolEnv(resourceNode, resourceSymbol.scope, env); resourceNode.annAttachments.forEach(a -> { a.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.RESOURCE, null); this.analyzeDef(a, resourceEnv); }); resourceNode.docAttachments.forEach(doc -> analyzeDef(doc, resourceEnv)); resourceNode.params.forEach(p -> this.analyzeDef(p, resourceEnv)); analyzeStmt(resourceNode.body, resourceEnv); this.processWorkers(resourceNode, resourceEnv); } public void visit(BLangTryCatchFinally tryCatchFinally) { analyzeStmt(tryCatchFinally.tryBody, env); tryCatchFinally.catchBlocks.forEach(c -> analyzeNode(c, env)); if (tryCatchFinally.finallyBody != null) { analyzeStmt(tryCatchFinally.finallyBody, env); } } public void visit(BLangCatch bLangCatch) { SymbolEnv catchBlockEnv = SymbolEnv.createBlockEnv(bLangCatch.body, env); analyzeNode(bLangCatch.param, catchBlockEnv); if (!this.types.checkStructEquivalency(bLangCatch.param.type, symTable.errStructType)) { dlog.error(bLangCatch.param.pos, DiagnosticCode.INCOMPATIBLE_TYPES, symTable.errStructType, bLangCatch.param.type); } analyzeStmt(bLangCatch.body, catchBlockEnv); } @Override public void visit(BLangTransaction transactionNode) { analyzeStmt(transactionNode.transactionBody, env); if (transactionNode.failedBody != null) { analyzeStmt(transactionNode.failedBody, env); } if (transactionNode.retryCount != null) { typeChecker.checkExpr(transactionNode.retryCount, env, Lists.of(symTable.intType)); checkRetryStmtValidity(transactionNode.retryCount); } } @Override public void visit(BLangAbort abortNode) { } private boolean isJoinResultType(BLangVariable var) { BLangType type = var.typeNode; if (type instanceof BuiltInReferenceTypeNode) { return ((BuiltInReferenceTypeNode) type).getTypeKind() == TypeKind.MAP; } return false; } private BLangVariableDef createVarDef(BLangVariable var) { BLangVariableDef varDefNode = new BLangVariableDef(); varDefNode.var = var; varDefNode.pos = var.pos; return varDefNode; } private BLangBlockStmt generateCodeBlock(StatementNode... statements) { BLangBlockStmt block = new BLangBlockStmt(); for (StatementNode stmt : statements) { block.addStatement(stmt); } return block; } @Override public void visit(BLangForkJoin forkJoin) { SymbolEnv forkJoinEnv = SymbolEnv.createFolkJoinEnv(forkJoin, this.env); forkJoin.workers.forEach(e -> this.symbolEnter.defineNode(e, forkJoinEnv)); forkJoin.workers.forEach(e -> this.analyzeDef(e, forkJoinEnv)); if (!this.isJoinResultType(forkJoin.joinResultVar)) { this.dlog.error(forkJoin.joinResultVar.pos, DiagnosticCode.INVALID_WORKER_JOIN_RESULT_TYPE); } /* create code black and environment for join result section, i.e. (map results) */ BLangBlockStmt joinResultsBlock = this.generateCodeBlock(this.createVarDef(forkJoin.joinResultVar)); SymbolEnv joinResultsEnv = SymbolEnv.createBlockEnv(joinResultsBlock, this.env); this.analyzeNode(joinResultsBlock, joinResultsEnv); /* create an environment for the join body, making the enclosing environment the earlier * join result's environment */ SymbolEnv joinBodyEnv = SymbolEnv.createBlockEnv(forkJoin.joinedBody, joinResultsEnv); this.analyzeNode(forkJoin.joinedBody, joinBodyEnv); if (forkJoin.timeoutExpression != null) { if (!this.isJoinResultType(forkJoin.timeoutVariable)) { this.dlog.error(forkJoin.timeoutVariable.pos, DiagnosticCode.INVALID_WORKER_TIMEOUT_RESULT_TYPE); } /* create code black and environment for timeout section */ BLangBlockStmt timeoutVarBlock = this.generateCodeBlock(this.createVarDef(forkJoin.timeoutVariable)); SymbolEnv timeoutVarEnv = SymbolEnv.createBlockEnv(timeoutVarBlock, this.env); this.typeChecker.checkExpr(forkJoin.timeoutExpression, timeoutVarEnv, Arrays.asList(symTable.intType)); this.analyzeNode(timeoutVarBlock, timeoutVarEnv); /* create an environment for the timeout body, making the enclosing environment the earlier * timeout var's environment */ SymbolEnv timeoutBodyEnv = SymbolEnv.createBlockEnv(forkJoin.timeoutBody, timeoutVarEnv); this.analyzeNode(forkJoin.timeoutBody, timeoutBodyEnv); } this.validateJoinWorkerList(forkJoin, forkJoinEnv); } private void validateJoinWorkerList(BLangForkJoin forkJoin, SymbolEnv forkJoinEnv) { forkJoin.joinedWorkers.forEach(e -> { if (!this.workerExists(forkJoinEnv, e.value)) { this.dlog.error(forkJoin.pos, DiagnosticCode.UNDEFINED_WORKER, e.value); } }); } @Override public void visit(BLangWorker workerNode) { SymbolEnv workerEnv = SymbolEnv.createWorkerEnv(workerNode, this.env); this.analyzeNode(workerNode.body, workerEnv); } private boolean isInTopLevelWorkerEnv() { return this.env.enclEnv.node.getKind() == NodeKind.WORKER; } private boolean workerExists(SymbolEnv env, String workerName) { BSymbol symbol = this.symResolver.lookupSymbol(env, new Name(workerName), SymTag.WORKER); return (symbol != this.symTable.notFoundSymbol); } @Override public void visit(BLangWorkerSend workerSendNode) { workerSendNode.env = this.env; workerSendNode.exprs.forEach(e -> this.typeChecker.checkExpr(e, this.env)); if (!this.isInTopLevelWorkerEnv()) { this.dlog.error(workerSendNode.pos, DiagnosticCode.INVALID_WORKER_SEND_POSITION); } if (!workerSendNode.isForkJoinSend) { String workerName = workerSendNode.workerIdentifier.getValue(); if (!this.workerExists(this.env, workerName)) { this.dlog.error(workerSendNode.pos, DiagnosticCode.UNDEFINED_WORKER, workerName); } } } @Override public void visit(BLangWorkerReceive workerReceiveNode) { workerReceiveNode.exprs.forEach(e -> this.typeChecker.checkExpr(e, this.env)); if (!this.isInTopLevelWorkerEnv()) { this.dlog.error(workerReceiveNode.pos, DiagnosticCode.INVALID_WORKER_RECEIVE_POSITION); } String workerName = workerReceiveNode.workerIdentifier.getValue(); if (!this.workerExists(this.env, workerName)) { this.dlog.error(workerReceiveNode.pos, DiagnosticCode.UNDEFINED_WORKER, workerName); } } private boolean checkReturnValueCounts(BLangReturn returnNode) { boolean success = false; int expRetCount = this.env.enclInvokable.getReturnParameters().size(); int actualRetCount = returnNode.exprs.size(); if (expRetCount > 1 && actualRetCount <= 1) { this.dlog.error(returnNode.pos, DiagnosticCode.MULTI_VALUE_RETURN_EXPECTED); } else if (expRetCount == 1 && actualRetCount > 1) { this.dlog.error(returnNode.pos, DiagnosticCode.SINGLE_VALUE_RETURN_EXPECTED); } else if (expRetCount == 0 && actualRetCount >= 1) { this.dlog.error(returnNode.pos, DiagnosticCode.RETURN_VALUE_NOT_EXPECTED); } else if (expRetCount > actualRetCount) { this.dlog.error(returnNode.pos, DiagnosticCode.NOT_ENOUGH_RETURN_VALUES); } else if (expRetCount < actualRetCount) { this.dlog.error(returnNode.pos, DiagnosticCode.TOO_MANY_RETURN_VALUES); } else { success = true; } return success; } private boolean isInvocationExpr(BLangExpression expr) { return expr.getKind() == NodeKind.INVOCATION; } @Override public void visit(BLangReturn returnNode) { if (returnNode.exprs.size() == 1 && this.isInvocationExpr(returnNode.exprs.get(0))) { /* a single return expression can be expanded to match a multi-value return */ this.typeChecker.checkExpr(returnNode.exprs.get(0), this.env, this.env.enclInvokable.getReturnParameters().stream() .map(e -> e.getTypeNode().type) .collect(Collectors.toList())); } else { if (returnNode.exprs.size() == 0 && this.env.enclInvokable.getReturnParameters().size() > 0 && !this.env.enclInvokable.getReturnParameters().get(0).name.value.isEmpty()) { returnNode.namedReturnVariables = this.env.enclInvokable.getReturnParameters(); return; } if (this.checkReturnValueCounts(returnNode)) { for (int i = 0; i < returnNode.exprs.size(); i++) { this.typeChecker.checkExpr(returnNode.exprs.get(i), this.env, Arrays.asList(this.env.enclInvokable.getReturnParameters().get(i).getTypeNode().type)); } } } } BType analyzeDef(BLangNode node, SymbolEnv env) { return analyzeNode(node, env); } BType analyzeStmt(BLangStatement stmtNode, SymbolEnv env) { return analyzeNode(stmtNode, env); } BType analyzeNode(BLangNode node, SymbolEnv env) { return analyzeNode(node, env, symTable.noType, null); } public void visit(BLangNext nextNode) { /* ignore */ } public void visit(BLangBreak breakNode) { /* ignore */ } @Override public void visit(BLangThrow throwNode) { this.typeChecker.checkExpr(throwNode.expr, env); if (!types.checkStructEquivalency(throwNode.expr.type, symTable.errStructType)) { dlog.error(throwNode.expr.pos, DiagnosticCode.INCOMPATIBLE_TYPES, symTable.errStructType, throwNode.expr.type); } } @Override public void visit(BLangTransformer transformerNode) { SymbolEnv transformerEnv = SymbolEnv.createTransformerEnv(transformerNode, transformerNode.symbol.scope, env); transformerNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint( BLangAnnotationAttachmentPoint.AttachmentPoint.TRANSFORMER, null); this.analyzeDef(annotationAttachment, transformerEnv); }); transformerNode.docAttachments.forEach(doc -> analyzeDef(doc, transformerEnv)); validateTransformerMappingType(transformerNode.source); validateTransformerMappingType(transformerNode.retParams.get(0)); analyzeStmt(transformerNode.body, transformerEnv); int returnCount = transformerNode.retParams.size(); if (returnCount == 0) { dlog.error(transformerNode.pos, DiagnosticCode.TRANSFORMER_MUST_HAVE_OUTPUT); } else if (returnCount > 1) { dlog.error(transformerNode.pos, DiagnosticCode.TOO_MANY_OUTPUTS_FOR_TRANSFORMER, 1, returnCount); } this.processWorkers(transformerNode, transformerEnv); } BType analyzeNode(BLangNode node, SymbolEnv env, BType expType, DiagnosticCode diagCode) { SymbolEnv prevEnv = this.env; BType preExpType = this.expType; DiagnosticCode preDiagCode = this.diagCode; this.env = env; this.expType = expType; this.diagCode = diagCode; node.accept(this); this.env = prevEnv; this.expType = preExpType; this.diagCode = preDiagCode; return resType; } private void handleForeachVariables(BLangForeach foreachStmt, List<BType> varTypes, SymbolEnv env) { for (int i = 0; i < foreachStmt.varRefs.size(); i++) { BLangExpression varRef = foreachStmt.varRefs.get(i); if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF) { dlog.error(varRef.pos, DiagnosticCode.INVALID_VARIABLE_ASSIGNMENT, varRef); continue; } BLangSimpleVarRef simpleVarRef = (BLangSimpleVarRef) varRef; simpleVarRef.lhsVar = true; Name varName = names.fromIdNode(simpleVarRef.variableName); if (varName == Names.IGNORE) { simpleVarRef.type = this.symTable.noType; typeChecker.checkExpr(simpleVarRef, env); continue; } BSymbol symbol = symResolver.lookupSymbol(env, varName, SymTag.VARIABLE); if (symbol == symTable.notFoundSymbol) { symbolEnter.defineVarSymbol(simpleVarRef.pos, Collections.emptySet(), varTypes.get(i), varName, env); typeChecker.checkExpr(simpleVarRef, env); } else { dlog.error(simpleVarRef.pos, DiagnosticCode.REDECLARED_SYMBOL, varName); } } } private void handleAssignNodeWithVar(BLangAssignment assignNode) { int ignoredCount = 0; int createdSymbolCount = 0; List<Name> newVariables = new ArrayList<Name>(); List<BType> expTypes = new ArrayList<>(); for (int i = 0; i < assignNode.varRefs.size(); i++) { BLangExpression varRef = assignNode.varRefs.get(i); if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF) { dlog.error(varRef.pos, DiagnosticCode.INVALID_VARIABLE_ASSIGNMENT, varRef); expTypes.add(symTable.errType); continue; } BLangSimpleVarRef simpleVarRef = (BLangSimpleVarRef) varRef; ((BLangVariableReference) varRef).lhsVar = true; Name varName = names.fromIdNode(simpleVarRef.variableName); if (varName == Names.IGNORE) { ignoredCount++; simpleVarRef.type = this.symTable.noType; expTypes.add(symTable.noType); typeChecker.checkExpr(simpleVarRef, env); continue; } BSymbol symbol = symResolver.lookupSymbol(env, varName, SymTag.VARIABLE); if (symbol == symTable.notFoundSymbol) { createdSymbolCount++; newVariables.add(varName); expTypes.add(symTable.noType); } else { expTypes.add(symbol.type); } } if (ignoredCount == assignNode.varRefs.size() || createdSymbolCount == 0) { dlog.error(assignNode.pos, DiagnosticCode.NO_NEW_VARIABLES_VAR_ASSIGNMENT); } final List<BType> rhsTypes = typeChecker.checkExpr(assignNode.expr, this.env, expTypes); for (int i = 0; i < assignNode.varRefs.size(); i++) { BLangExpression varRef = assignNode.varRefs.get(i); if (varRef.getKind() != NodeKind.SIMPLE_VARIABLE_REF) { continue; } BType actualType = rhsTypes.get(i); BLangSimpleVarRef simpleVarRef = (BLangSimpleVarRef) varRef; Name varName = names.fromIdNode(simpleVarRef.variableName); if (newVariables.contains(varName)) { this.symbolEnter.defineVarSymbol(simpleVarRef.pos, Collections.emptySet(), actualType, varName, env); } typeChecker.checkExpr(simpleVarRef, env); } } private void checkRetryStmtValidity(BLangExpression retryCountExpr) { boolean error = true; NodeKind retryKind = retryCountExpr.getKind(); if (retryKind == NodeKind.LITERAL) { if (retryCountExpr.type.tag == TypeTags.INT) { int retryCount = Integer.parseInt(((BLangLiteral) retryCountExpr).getValue().toString()); if (retryCount >= 0) { error = false; } } } else if (retryKind == NodeKind.SIMPLE_VARIABLE_REF) { if (((BLangSimpleVarRef) retryCountExpr).symbol.flags == Flags.CONST) { if (((BLangSimpleVarRef) retryCountExpr).symbol.type.tag == TypeTags.INT) { error = false; } } } if (error) { this.dlog.error(retryCountExpr.pos, DiagnosticCode.INVALID_RETRY_COUNT); } } private void validateTransformerMappingType(BLangVariable param) { BType type = param.type; if (types.isValueType(type) || (type instanceof BBuiltInRefType) || type.tag == TypeTags.STRUCT) { return; } dlog.error(param.pos, DiagnosticCode.TRANSFORMER_UNSUPPORTED_TYPES, type); } }
But it only does it if the input `isFinished()`, so that should be a non issue.
public boolean processInput() throws Exception { initializeNumRecordsIn(); StreamElement recordOrMark = input.pollNextNullable(); if (recordOrMark != null) { int channel = input.getLastChannel(); checkState(channel != StreamTaskInput.UNSPECIFIED); processElement(recordOrMark, channel); } checkFinished(); return recordOrMark != null; }
checkFinished();
public boolean processInput() throws Exception { initializeNumRecordsIn(); StreamElement recordOrMark = input.pollNextNullable(); if (recordOrMark != null) { int channel = input.getLastChannel(); checkState(channel != StreamTaskInput.UNSPECIFIED); processElement(recordOrMark, channel); } checkFinished(); return recordOrMark != null; }
class StreamOneInputProcessor<IN> implements StreamInputProcessor { private static final Logger LOG = LoggerFactory.getLogger(StreamOneInputProcessor.class); private final StreamTaskInput input; private final Object lock; private final OperatorChain<?, ?> operatorChain; /** Valve that controls how watermarks and stream statuses are forwarded. */ private StatusWatermarkValve statusWatermarkValve; private final StreamStatusMaintainer streamStatusMaintainer; private final OneInputStreamOperator<IN, ?> streamOperator; private final WatermarkGauge watermarkGauge; private Counter numRecordsIn; @SuppressWarnings("unchecked") public StreamOneInputProcessor( InputGate[] inputGates, TypeSerializer<IN> inputSerializer, StreamTask<?, ?> checkpointedTask, CheckpointingMode checkpointMode, Object lock, IOManager ioManager, Configuration taskManagerConfig, StreamStatusMaintainer streamStatusMaintainer, OneInputStreamOperator<IN, ?> streamOperator, TaskIOMetricGroup metrics, WatermarkGauge watermarkGauge, String taskName, OperatorChain<?, ?> operatorChain) throws IOException { InputGate inputGate = InputGateUtil.createInputGate(inputGates); CheckpointBarrierHandler barrierHandler = InputProcessorUtil.createCheckpointBarrierHandler( checkpointedTask, checkpointMode, ioManager, inputGate, taskManagerConfig, taskName); this.input = new StreamTaskNetworkInput(barrierHandler, inputSerializer, ioManager, 0); this.lock = checkNotNull(lock); this.streamStatusMaintainer = checkNotNull(streamStatusMaintainer); this.streamOperator = checkNotNull(streamOperator); this.statusWatermarkValve = new StatusWatermarkValve( inputGate.getNumberOfInputChannels(), new ForwardingValveOutputHandler(streamOperator, lock)); this.watermarkGauge = watermarkGauge; metrics.gauge("checkpointAlignmentTime", barrierHandler::getAlignmentDurationNanos); this.operatorChain = checkNotNull(operatorChain); } @Override public boolean isFinished() { return input.isFinished(); } @Override public CompletableFuture<?> isAvailable() { return input.isAvailable(); } @Override private void processElement(StreamElement recordOrMark, int channel) throws Exception { if (recordOrMark.isRecord()) { StreamRecord<IN> record = recordOrMark.asRecord(); synchronized (lock) { numRecordsIn.inc(); streamOperator.setKeyContextElement1(record); streamOperator.processElement(record); } } else if (recordOrMark.isWatermark()) { statusWatermarkValve.inputWatermark(recordOrMark.asWatermark(), channel); } else if (recordOrMark.isStreamStatus()) { statusWatermarkValve.inputStreamStatus(recordOrMark.asStreamStatus(), channel); } else if (recordOrMark.isLatencyMarker()) { synchronized (lock) { streamOperator.processLatencyMarker(recordOrMark.asLatencyMarker()); } } else { throw new UnsupportedOperationException("Unknown type of StreamElement"); } } private void checkFinished() throws Exception { if (input.isFinished()) { synchronized (lock) { operatorChain.endInput(1); } } } private void initializeNumRecordsIn() { if (numRecordsIn == null) { try { numRecordsIn = ((OperatorMetricGroup) streamOperator.getMetricGroup()).getIOMetricGroup().getNumRecordsInCounter(); } catch (Exception e) { LOG.warn("An exception occurred during the metrics setup.", e); numRecordsIn = new SimpleCounter(); } } } @Override public void close() throws IOException { input.close(); } private class ForwardingValveOutputHandler implements StatusWatermarkValve.ValveOutputHandler { private final OneInputStreamOperator<IN, ?> operator; private final Object lock; private ForwardingValveOutputHandler(final OneInputStreamOperator<IN, ?> operator, final Object lock) { this.operator = checkNotNull(operator); this.lock = checkNotNull(lock); } @Override public void handleWatermark(Watermark watermark) { try { synchronized (lock) { watermarkGauge.setCurrentWatermark(watermark.getTimestamp()); operator.processWatermark(watermark); } } catch (Exception e) { throw new RuntimeException("Exception occurred while processing valve output watermark: ", e); } } @SuppressWarnings("unchecked") @Override public void handleStreamStatus(StreamStatus streamStatus) { try { synchronized (lock) { streamStatusMaintainer.toggleStreamStatus(streamStatus); } } catch (Exception e) { throw new RuntimeException("Exception occurred while processing valve output stream status: ", e); } } } }
class StreamOneInputProcessor<IN> implements StreamInputProcessor { private static final Logger LOG = LoggerFactory.getLogger(StreamOneInputProcessor.class); private final StreamTaskInput input; private final Object lock; private final OperatorChain<?, ?> operatorChain; /** Valve that controls how watermarks and stream statuses are forwarded. */ private StatusWatermarkValve statusWatermarkValve; private final StreamStatusMaintainer streamStatusMaintainer; private final OneInputStreamOperator<IN, ?> streamOperator; private final WatermarkGauge watermarkGauge; private Counter numRecordsIn; @SuppressWarnings("unchecked") public StreamOneInputProcessor( InputGate[] inputGates, TypeSerializer<IN> inputSerializer, StreamTask<?, ?> checkpointedTask, CheckpointingMode checkpointMode, Object lock, IOManager ioManager, Configuration taskManagerConfig, StreamStatusMaintainer streamStatusMaintainer, OneInputStreamOperator<IN, ?> streamOperator, TaskIOMetricGroup metrics, WatermarkGauge watermarkGauge, String taskName, OperatorChain<?, ?> operatorChain) throws IOException { InputGate inputGate = InputGateUtil.createInputGate(inputGates); CheckpointedInputGate barrierHandler = InputProcessorUtil.createCheckpointedInputGate( checkpointedTask, checkpointMode, ioManager, inputGate, taskManagerConfig, taskName); this.input = new StreamTaskNetworkInput(barrierHandler, inputSerializer, ioManager, 0); this.lock = checkNotNull(lock); this.streamStatusMaintainer = checkNotNull(streamStatusMaintainer); this.streamOperator = checkNotNull(streamOperator); this.statusWatermarkValve = new StatusWatermarkValve( inputGate.getNumberOfInputChannels(), new ForwardingValveOutputHandler(streamOperator, lock)); this.watermarkGauge = watermarkGauge; metrics.gauge("checkpointAlignmentTime", barrierHandler::getAlignmentDurationNanos); this.operatorChain = checkNotNull(operatorChain); } @Override public boolean isFinished() { return input.isFinished(); } @Override public CompletableFuture<?> isAvailable() { return input.isAvailable(); } @Override private void processElement(StreamElement recordOrMark, int channel) throws Exception { if (recordOrMark.isRecord()) { StreamRecord<IN> record = recordOrMark.asRecord(); synchronized (lock) { numRecordsIn.inc(); streamOperator.setKeyContextElement1(record); streamOperator.processElement(record); } } else if (recordOrMark.isWatermark()) { statusWatermarkValve.inputWatermark(recordOrMark.asWatermark(), channel); } else if (recordOrMark.isStreamStatus()) { statusWatermarkValve.inputStreamStatus(recordOrMark.asStreamStatus(), channel); } else if (recordOrMark.isLatencyMarker()) { synchronized (lock) { streamOperator.processLatencyMarker(recordOrMark.asLatencyMarker()); } } else { throw new UnsupportedOperationException("Unknown type of StreamElement"); } } private void checkFinished() throws Exception { if (input.isFinished()) { synchronized (lock) { operatorChain.endInput(1); } } } private void initializeNumRecordsIn() { if (numRecordsIn == null) { try { numRecordsIn = ((OperatorMetricGroup) streamOperator.getMetricGroup()).getIOMetricGroup().getNumRecordsInCounter(); } catch (Exception e) { LOG.warn("An exception occurred during the metrics setup.", e); numRecordsIn = new SimpleCounter(); } } } @Override public void close() throws IOException { input.close(); } private class ForwardingValveOutputHandler implements StatusWatermarkValve.ValveOutputHandler { private final OneInputStreamOperator<IN, ?> operator; private final Object lock; private ForwardingValveOutputHandler(final OneInputStreamOperator<IN, ?> operator, final Object lock) { this.operator = checkNotNull(operator); this.lock = checkNotNull(lock); } @Override public void handleWatermark(Watermark watermark) { try { synchronized (lock) { watermarkGauge.setCurrentWatermark(watermark.getTimestamp()); operator.processWatermark(watermark); } } catch (Exception e) { throw new RuntimeException("Exception occurred while processing valve output watermark: ", e); } } @SuppressWarnings("unchecked") @Override public void handleStreamStatus(StreamStatus streamStatus) { try { synchronized (lock) { streamStatusMaintainer.toggleStreamStatus(streamStatus); } } catch (Exception e) { throw new RuntimeException("Exception occurred while processing valve output stream status: ", e); } } } }
For the extension tests, I wonder if we would have to dump the full test deployment classpath as we might add some deployment extensions for testing.
protected void doExecute() throws MojoExecutionException, MojoFailureException { final String lifecyclePhase = mojoExecution.getLifecyclePhase(); if (mode == null) { if (lifecyclePhase == null) { mode = "NORMAL"; } else { mode = lifecyclePhase.contains("test") ? "TEST" : "NORMAL"; } } final LaunchMode launchMode = LaunchMode.valueOf(mode); if (getLog().isDebugEnabled()) { getLog().debug("Bootstrapping Quarkus application in mode " + launchMode); } Path compareFile; if (this.recordedBuildConfigFile == null) { compareFile = recordedBuildConfigDirectory.toPath() .resolve("quarkus-" + launchMode.getDefaultProfile() + "-config-dump"); } else if (this.recordedBuildConfigFile.isAbsolute()) { compareFile = this.recordedBuildConfigFile.toPath(); } else { compareFile = recordedBuildConfigDirectory.toPath().resolve(this.recordedBuildConfigFile.toPath()); } final boolean prevConfigExists = Files.exists(compareFile); if (!prevConfigExists && !dumpCurrentWhenRecordedUnavailable && !dumpDependencies) { getLog().info("Config dump from the previous build does not exist at " + compareFile); return; } CuratedApplication curatedApplication = null; QuarkusClassLoader deploymentClassLoader = null; final ClassLoader originalCl = Thread.currentThread().getContextClassLoader(); final boolean clearPackageTypeSystemProperty = setPackageTypeSystemPropertyIfNativeProfileEnabled(); try { curatedApplication = bootstrapApplication(launchMode); if (prevConfigExists || dumpCurrentWhenRecordedUnavailable) { final Path targetFile = getOutputFile(outputFile, launchMode.getDefaultProfile(), "-config-check"); Properties compareProps = null; if (prevConfigExists) { compareProps = new Properties(); try (BufferedReader reader = Files.newBufferedReader(compareFile)) { compareProps.load(reader); } catch (IOException e) { throw new RuntimeException("Failed to read " + compareFile, e); } } deploymentClassLoader = curatedApplication.createDeploymentClassLoader(); Thread.currentThread().setContextClassLoader(deploymentClassLoader); final Class<?> codeGenerator = deploymentClassLoader.loadClass("io.quarkus.deployment.CodeGenerator"); final Method dumpConfig = codeGenerator.getMethod("dumpCurrentConfigValues", ApplicationModel.class, String.class, Properties.class, QuarkusClassLoader.class, Properties.class, Path.class); dumpConfig.invoke(null, curatedApplication.getApplicationModel(), launchMode.name(), getBuildSystemProperties(true), deploymentClassLoader, compareProps, targetFile); } if (dumpDependencies) { final List<String> deps = new ArrayList<>(); for (var d : curatedApplication.getApplicationModel().getDependencies(DependencyFlags.DEPLOYMENT_CP)) { var adler32 = new Adler32(); updateChecksum(adler32, d.getResolvedPaths()); deps.add(d.toGACTVString() + " " + adler32.getValue()); } Collections.sort(deps); final Path targetFile = getOutputFile(dependenciesFile, launchMode.getDefaultProfile(), "-dependency-checksums.txt"); Files.createDirectories(targetFile.getParent()); try (BufferedWriter writer = Files.newBufferedWriter(targetFile)) { for (var s : deps) { writer.write(s); writer.newLine(); } } } } catch (Exception any) { throw new MojoExecutionException("Failed to bootstrap Quarkus application", any); } finally { if (clearPackageTypeSystemProperty) { System.clearProperty(PACKAGE_TYPE_PROP); } Thread.currentThread().setContextClassLoader(originalCl); if (deploymentClassLoader != null) { deploymentClassLoader.close(); } } }
final List<String> deps = new ArrayList<>();
protected void doExecute() throws MojoExecutionException, MojoFailureException { final String lifecyclePhase = mojoExecution.getLifecyclePhase(); if (mode == null) { if (lifecyclePhase == null) { mode = "NORMAL"; } else { mode = lifecyclePhase.contains("test") ? "TEST" : "NORMAL"; } } final LaunchMode launchMode = LaunchMode.valueOf(mode); if (getLog().isDebugEnabled()) { getLog().debug("Bootstrapping Quarkus application in mode " + launchMode); } Path compareFile; if (this.recordedBuildConfigFile == null) { compareFile = recordedBuildConfigDirectory.toPath() .resolve("quarkus-" + launchMode.getDefaultProfile() + "-config-dump"); } else if (this.recordedBuildConfigFile.isAbsolute()) { compareFile = this.recordedBuildConfigFile.toPath(); } else { compareFile = recordedBuildConfigDirectory.toPath().resolve(this.recordedBuildConfigFile.toPath()); } final boolean prevConfigExists = Files.exists(compareFile); if (!prevConfigExists && !dumpCurrentWhenRecordedUnavailable && !dumpDependencies) { getLog().info("Config dump from the previous build does not exist at " + compareFile); return; } CuratedApplication curatedApplication = null; QuarkusClassLoader deploymentClassLoader = null; final ClassLoader originalCl = Thread.currentThread().getContextClassLoader(); final boolean clearPackageTypeSystemProperty = setPackageTypeSystemPropertyIfNativeProfileEnabled(); try { curatedApplication = bootstrapApplication(launchMode); if (prevConfigExists || dumpCurrentWhenRecordedUnavailable) { final Path targetFile = getOutputFile(outputFile, launchMode.getDefaultProfile(), "-config-check"); Properties compareProps = new Properties(); if (prevConfigExists) { try (BufferedReader reader = Files.newBufferedReader(compareFile)) { compareProps.load(reader); } catch (IOException e) { throw new RuntimeException("Failed to read " + compareFile, e); } } deploymentClassLoader = curatedApplication.createDeploymentClassLoader(); Thread.currentThread().setContextClassLoader(deploymentClassLoader); final Class<?> codeGenerator = deploymentClassLoader.loadClass("io.quarkus.deployment.CodeGenerator"); final Method dumpConfig = codeGenerator.getMethod("dumpCurrentConfigValues", ApplicationModel.class, String.class, Properties.class, QuarkusClassLoader.class, Properties.class, Path.class); dumpConfig.invoke(null, curatedApplication.getApplicationModel(), launchMode.name(), getBuildSystemProperties(true), deploymentClassLoader, compareProps, targetFile); } if (dumpDependencies) { final List<String> deps = new ArrayList<>(); for (var d : curatedApplication.getApplicationModel().getDependencies(DependencyFlags.DEPLOYMENT_CP)) { StringBuilder entry = new StringBuilder(d.toGACTVString()); if (d.isSnapshot()) { var adler32 = new Adler32(); updateChecksum(adler32, d.getResolvedPaths()); entry.append(" ").append(adler32.getValue()); } deps.add(entry.toString()); } Collections.sort(deps); final Path targetFile = getOutputFile(dependenciesFile, launchMode.getDefaultProfile(), "-dependency-checksums.txt"); Files.createDirectories(targetFile.getParent()); try (BufferedWriter writer = Files.newBufferedWriter(targetFile)) { for (var s : deps) { writer.write(s); writer.newLine(); } } } } catch (Exception any) { throw new MojoExecutionException("Failed to bootstrap Quarkus application", any); } finally { if (clearPackageTypeSystemProperty) { System.clearProperty(PACKAGE_TYPE_PROP); } Thread.currentThread().setContextClassLoader(originalCl); if (deploymentClassLoader != null) { deploymentClassLoader.close(); } } }
class TrackConfigChangesMojo extends QuarkusBootstrapMojo { /** * Skip the execution of this mojo */ @Parameter(defaultValue = "false", property = "quarkus.track-config-changes.skip") boolean skip = false; @Parameter(property = "launchMode") String mode; @Parameter(property = "quarkus.track-config-changes.outputDirectory", defaultValue = "${project.build.directory}") File outputDirectory; @Parameter(property = "quarkus.track-config-changes.outputFile", required = false) File outputFile; @Parameter(property = "quarkus.recorded-build-config.directory", defaultValue = "${basedir}/.quarkus") File recordedBuildConfigDirectory; @Parameter(property = "quarkus.recorded-build-config.file", required = false) File recordedBuildConfigFile; /** * Whether to dump the current build configuration in case the configuration from the previous build isn't found */ @Parameter(defaultValue = "false", property = "quarkus.track-config-changes.dump-current-when-recorded-unavailable") boolean dumpCurrentWhenRecordedUnavailable; /** * Whether to dump Quarkus application dependencies along with their checksums */ @Parameter(defaultValue = "true", property = "quarkus.track-config-changes.dump-dependencies") boolean dumpDependencies; /** * Dependency dump file */ @Parameter(property = "quarkus.track-config-changes.dependenciesFile") File dependenciesFile; @Override protected boolean beforeExecute() throws MojoExecutionException, MojoFailureException { if (skip) { getLog().info("Skipping config dump"); return false; } return true; } @Override private Path getOutputFile(File outputFile, String profile, String fileNameSuffix) { if (outputFile == null) { return outputDirectory.toPath().resolve("quarkus-" + profile + fileNameSuffix); } if (outputFile.isAbsolute()) { return outputFile.toPath(); } return outputDirectory.toPath().resolve(outputFile.toPath()); } private static void updateChecksum(Checksum checksum, Iterable<Path> pc) throws IOException { for (var path : sort(pc)) { if (Files.isDirectory(path)) { try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) { updateChecksum(checksum, stream); } } else { checksum.update(Files.readAllBytes(path)); } } } private static Iterable<Path> sort(Iterable<Path> original) { var i = original.iterator(); if (!i.hasNext()) { return List.of(); } var o = i.next(); if (!i.hasNext()) { return List.of(o); } final List<Path> sorted = new ArrayList<>(); sorted.add(o); while (i.hasNext()) { sorted.add(i.next()); } Collections.sort(sorted); return sorted; } }
class TrackConfigChangesMojo extends QuarkusBootstrapMojo { /** * Skip the execution of this mojo */ @Parameter(defaultValue = "false", property = "quarkus.track-config-changes.skip") boolean skip = false; @Parameter(property = "launchMode") String mode; @Parameter(property = "quarkus.track-config-changes.outputDirectory", defaultValue = "${project.build.directory}") File outputDirectory; @Parameter(property = "quarkus.track-config-changes.outputFile", required = false) File outputFile; @Parameter(property = "quarkus.recorded-build-config.directory", defaultValue = "${basedir}/.quarkus") File recordedBuildConfigDirectory; @Parameter(property = "quarkus.recorded-build-config.file", required = false) File recordedBuildConfigFile; /** * Whether to dump the current build configuration in case the configuration from the previous build isn't found */ @Parameter(defaultValue = "false", property = "quarkus.track-config-changes.dump-current-when-recorded-unavailable") boolean dumpCurrentWhenRecordedUnavailable; /** * Whether to dump Quarkus application dependencies along with their checksums */ @Parameter(defaultValue = "true", property = "quarkus.track-config-changes.dump-dependencies") boolean dumpDependencies; /** * Dependency dump file */ @Parameter(property = "quarkus.track-config-changes.dependencies-file") File dependenciesFile; @Override protected boolean beforeExecute() throws MojoExecutionException, MojoFailureException { if (skip) { getLog().info("Skipping config dump"); return false; } return true; } @Override private Path getOutputFile(File outputFile, String profile, String fileNameSuffix) { if (outputFile == null) { return outputDirectory.toPath().resolve("quarkus-" + profile + fileNameSuffix); } if (outputFile.isAbsolute()) { return outputFile.toPath(); } return outputDirectory.toPath().resolve(outputFile.toPath()); } private static void updateChecksum(Checksum checksum, Iterable<Path> pc) throws IOException { for (var path : sort(pc)) { if (Files.isDirectory(path)) { try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) { updateChecksum(checksum, stream); } } else { checksum.update(Files.readAllBytes(path)); } } } private static Iterable<Path> sort(Iterable<Path> original) { var i = original.iterator(); if (!i.hasNext()) { return List.of(); } var o = i.next(); if (!i.hasNext()) { return List.of(o); } final List<Path> sorted = new ArrayList<>(); sorted.add(o); while (i.hasNext()) { sorted.add(i.next()); } Collections.sort(sorted); return sorted; } }
```suggestion if (!extracted.isPresent()) { ```
public Rule build() { return logicalFilter().when(filter -> !filter.isSingleTableExpressionExtracted()).then(filter -> { List<Expression> conjuncts = ExpressionUtils.extractConjunction(filter.getPredicates()) .stream().collect(Collectors.toList()); List<Expression> redundants = Lists.newArrayList(); for (Expression conjunct : conjuncts) { List<Expression> disjuncts = ExpressionUtils.extractDisjunction(conjunct); if (disjuncts.size() == 1) { continue; } Expression first = disjuncts.get(0); Set<SlotReference> slots = first.getInputSlots() .stream() .map(SlotReference.class::cast) .collect(Collectors.toSet()); Set<String> qualifiers = slots.stream() .map(slot -> getSlotQualifierAsString(slot)) .collect(Collectors.toSet()); for (String qualifier : qualifiers) { List<Expression> extract4all = Lists.newArrayList(); boolean success = true; for (Expression expr : ExpressionUtils.extractDisjunction(conjunct)) { Optional<Expression> extracted = extractSingleTableExpression(expr, qualifier); if (! extracted.isPresent()) { success = false; break; } else { extract4all.add(extracted.get()); } } if (success) { redundants.add(ExpressionUtils.or(extract4all)); } } } if (redundants.isEmpty()) { return new LogicalFilter<>(filter.getPredicates(), true, filter.child()); } else { Expression newPredicate = ExpressionUtils.and(filter.getPredicates(), ExpressionUtils.and(redundants)); return new LogicalFilter<>(newPredicate, true, filter.child()); } }).toRule(RuleType.EXTRACT_SINGLE_TABLE_EXPRESSION_FROM_DISJUNCTION); }
if (! extracted.isPresent()) {
public Rule build() { return logicalFilter().whenNot(LogicalFilter::isSingleTableExpressionExtracted).then(filter -> { List<Expression> conjuncts = ExpressionUtils.extractConjunction(filter.getPredicates()) .stream().collect(Collectors.toList()); List<Expression> redundants = Lists.newArrayList(); for (Expression conjunct : conjuncts) { List<Expression> disjuncts = ExpressionUtils.extractDisjunction(conjunct); if (disjuncts.size() == 1) { continue; } Expression first = disjuncts.get(0); Set<String> qualifiers = first.getInputSlots() .stream() .map(SlotReference.class::cast) .map(this::getSlotQualifierAsString) .collect(Collectors.toSet()); for (String qualifier : qualifiers) { List<Expression> extractForAll = Lists.newArrayList(); boolean success = true; for (Expression expr : ExpressionUtils.extractDisjunction(conjunct)) { Optional<Expression> extracted = extractSingleTableExpression(expr, qualifier); if (!extracted.isPresent()) { success = false; break; } else { extractForAll.add(extracted.get()); } } if (success) { redundants.add(ExpressionUtils.or(extractForAll)); } } } if (redundants.isEmpty()) { return new LogicalFilter<>(filter.getPredicates(), true, filter.child()); } else { Expression newPredicate = ExpressionUtils.and(filter.getPredicates(), ExpressionUtils.and(redundants)); return new LogicalFilter<>(newPredicate, true, filter.child()); } }).toRule(RuleType.EXTRACT_SINGLE_TABLE_EXPRESSION_FROM_DISJUNCTION); }
class ExtractSingleTableExpressionFromDisjunction extends OneRewriteRuleFactory { @Override private String getSlotQualifierAsString(SlotReference slotReference) { StringBuilder builder = new StringBuilder(); for (String q : slotReference.getQualifier()) { builder.append(q).append('.'); } return builder.toString(); } private Optional<Expression> extractSingleTableExpression(Expression expr, String qualifier) { List<Expression> output = Lists.newArrayList(); List<Expression> conjuncts = ExpressionUtils.extractConjunction(expr); for (Expression conjunct : conjuncts) { if (isSingleTableExpression(conjunct, qualifier)) { output.add(conjunct); } } if (output.isEmpty()) { return Optional.empty(); } else { return Optional.of(ExpressionUtils.and(output)); } } private boolean isSingleTableExpression(Expression expr, String qualifier) { for (Slot slot : expr.getInputSlots()) { String slotQualifier = getSlotQualifierAsString((SlotReference) slot); if (!slotQualifier.equals(qualifier)) { return false; } } return true; } }
class ExtractSingleTableExpressionFromDisjunction extends OneRewriteRuleFactory { @Override private String getSlotQualifierAsString(SlotReference slotReference) { StringBuilder builder = new StringBuilder(); for (String q : slotReference.getQualifier()) { builder.append(q).append('.'); } return builder.toString(); } private Optional<Expression> extractSingleTableExpression(Expression expr, String qualifier) { List<Expression> output = Lists.newArrayList(); List<Expression> conjuncts = ExpressionUtils.extractConjunction(expr); for (Expression conjunct : conjuncts) { if (isSingleTableExpression(conjunct, qualifier)) { output.add(conjunct); } } if (output.isEmpty()) { return Optional.empty(); } else { return Optional.of(ExpressionUtils.and(output)); } } private boolean isSingleTableExpression(Expression expr, String qualifier) { for (Slot slot : expr.getInputSlots()) { String slotQualifier = getSlotQualifierAsString((SlotReference) slot); if (!slotQualifier.equals(qualifier)) { return false; } } return true; } }
Personal preference: I would prefer that to be ```suggestion reflection.produce(new ReflectiveClassBuildItem(true, true, "com.sun.jndi.ldap.LdapCtxFactory")); ```
void registerForReflection(BuildProducer<ReflectiveClassBuildItem> reflection) { reflection.produce(new ReflectiveClassBuildItem(true, true, QuarkusDirContextFactory.INITIAL_CONTEXT_FACTORY)); reflection.produce(new ReflectiveClassBuildItem(false, false, "com.sun.jndi.dns.DnsContextFactory")); reflection.produce(new ReflectiveClassBuildItem(false, false, "com.sun.jndi.rmi.registry.RegistryContextFactory")); }
reflection.produce(new ReflectiveClassBuildItem(true, true, QuarkusDirContextFactory.INITIAL_CONTEXT_FACTORY));
void registerForReflection(BuildProducer<ReflectiveClassBuildItem> reflection) { reflection.produce(new ReflectiveClassBuildItem(true, true, QuarkusDirContextFactory.INITIAL_CONTEXT_FACTORY)); reflection.produce(new ReflectiveClassBuildItem(false, false, "com.sun.jndi.dns.DnsContextFactory")); reflection.produce(new ReflectiveClassBuildItem(false, false, "com.sun.jndi.rmi.registry.RegistryContextFactory")); }
class ElytronSecurityLdapProcessor { @BuildStep() FeatureBuildItem feature() { return new FeatureBuildItem(Feature.SECURITY_LDAP); } @BuildStep AllowJNDIBuildItem enableJndi() { return new AllowJNDIBuildItem(); } /** * Check to see if a LdapRealmConfig was specified and enabled and create a * {@linkplain org.wildfly.security.auth.realm.ldap.LdapSecurityRealm} */ @BuildStep @Record(ExecutionTime.RUNTIME_INIT) void configureLdapRealmAuthConfig(LdapRecorder recorder, LdapSecurityRealmBuildTimeConfig ldapSecurityRealmBuildTimeConfig, LdapSecurityRealmRuntimeConfig ldapSecurityRealmRuntimeConfig, BuildProducer<SecurityRealmBuildItem> securityRealm, BeanContainerBuildItem beanContainerBuildItem ) throws Exception { if (!ldapSecurityRealmBuildTimeConfig.enabled) { return; } RuntimeValue<SecurityRealm> realm = recorder.createRealm(ldapSecurityRealmRuntimeConfig); securityRealm.produce(new SecurityRealmBuildItem(realm, ldapSecurityRealmBuildTimeConfig.realmName, null)); } @BuildStep ElytronPasswordMarkerBuildItem marker(LdapSecurityRealmBuildTimeConfig ldapSecurityRealmBuildTimeConfig) { if (!ldapSecurityRealmBuildTimeConfig.enabled) { return null; } return new ElytronPasswordMarkerBuildItem(); } @BuildStep }
class ElytronSecurityLdapProcessor { @BuildStep() FeatureBuildItem feature() { return new FeatureBuildItem(Feature.SECURITY_LDAP); } @BuildStep AllowJNDIBuildItem enableJndi() { return new AllowJNDIBuildItem(); } /** * Check to see if a LdapRealmConfig was specified and enabled and create a * {@linkplain org.wildfly.security.auth.realm.ldap.LdapSecurityRealm} */ @BuildStep @Record(ExecutionTime.RUNTIME_INIT) void configureLdapRealmAuthConfig(LdapRecorder recorder, LdapSecurityRealmBuildTimeConfig ldapSecurityRealmBuildTimeConfig, LdapSecurityRealmRuntimeConfig ldapSecurityRealmRuntimeConfig, BuildProducer<SecurityRealmBuildItem> securityRealm, BeanContainerBuildItem beanContainerBuildItem ) throws Exception { if (!ldapSecurityRealmBuildTimeConfig.enabled) { return; } RuntimeValue<SecurityRealm> realm = recorder.createRealm(ldapSecurityRealmRuntimeConfig); securityRealm.produce(new SecurityRealmBuildItem(realm, ldapSecurityRealmBuildTimeConfig.realmName, null)); } @BuildStep ElytronPasswordMarkerBuildItem marker(LdapSecurityRealmBuildTimeConfig ldapSecurityRealmBuildTimeConfig) { if (!ldapSecurityRealmBuildTimeConfig.enabled) { return null; } return new ElytronPasswordMarkerBuildItem(); } @BuildStep }
```suggestion "or by adding '%s' or '%s' to your Quarkus configuration", ```
private void configureBaseUrl(RestClientBuilder builder) { Optional<String> propertyOptional = getOptionalProperty(REST_URI_FORMAT, String.class); if (!propertyOptional.isPresent()) { propertyOptional = getOptionalProperty(REST_URL_FORMAT, String.class); } if (((baseUriFromAnnotation == null) || baseUriFromAnnotation.isEmpty()) && !propertyOptional.isPresent()) { throw new IllegalArgumentException( String.format( "Unable to determine the proper baseUrl/baseUri. " + "Consider registering using @RegisterRestClient(baseUri=\"someuri\"), @RegisterRestClient(configKey=\"orkey\"), " + "or by adding '%s' or '%s'to your Quarkus configuration", String.format(REST_URL_FORMAT, propertyPrefix), String.format(REST_URI_FORMAT, propertyPrefix))); } String baseUrl = propertyOptional.orElse(baseUriFromAnnotation); try { builder.baseUrl(new URL(baseUrl)); } catch (MalformedURLException e) { throw new IllegalArgumentException("The value of URL was invalid " + baseUrl, e); } catch (Exception e) { if ("com.oracle.svm.core.jdk.UnsupportedFeatureError".equals(e.getClass().getCanonicalName())) { throw new IllegalArgumentException(baseUrl + " requires SSL support but it is disabled. You probably have set quarkus.ssl.native to false."); } throw e; } }
"or by adding '%s' or '%s'to your Quarkus configuration",
private void configureBaseUrl(RestClientBuilder builder) { Optional<String> propertyOptional = getOptionalProperty(REST_URI_FORMAT, String.class); if (!propertyOptional.isPresent()) { propertyOptional = getOptionalProperty(REST_URL_FORMAT, String.class); } if (((baseUriFromAnnotation == null) || baseUriFromAnnotation.isEmpty()) && !propertyOptional.isPresent()) { throw new IllegalArgumentException( String.format( "Unable to determine the proper baseUrl/baseUri. " + "Consider registering using @RegisterRestClient(baseUri=\"someuri\"), @RegisterRestClient(configKey=\"orkey\"), " + "or by adding '%s' or '%s' to your Quarkus configuration", String.format(REST_URL_FORMAT, propertyPrefix), String.format(REST_URI_FORMAT, propertyPrefix))); } String baseUrl = propertyOptional.orElse(baseUriFromAnnotation); try { builder.baseUrl(new URL(baseUrl)); } catch (MalformedURLException e) { throw new IllegalArgumentException("The value of URL was invalid " + baseUrl, e); } catch (Exception e) { if ("com.oracle.svm.core.jdk.UnsupportedFeatureError".equals(e.getClass().getCanonicalName())) { throw new IllegalArgumentException(baseUrl + " requires SSL support but it is disabled. You probably have set quarkus.ssl.native to false."); } throw e; } }
class RestClientBase { public static final String MP_REST = "mp-rest"; public static final String REST_URL_FORMAT = "%s/" + MP_REST + "/url"; public static final String REST_URI_FORMAT = "%s/" + MP_REST + "/uri"; public static final String REST_CONNECT_TIMEOUT_FORMAT = "%s/" + MP_REST + "/connectTimeout"; public static final String REST_READ_TIMEOUT_FORMAT = "%s/" + MP_REST + "/readTimeout"; public static final String REST_SCOPE_FORMAT = "%s/" + MP_REST + "/scope"; public static final String REST_PROVIDERS = "%s/" + MP_REST + "/providers"; public static final String REST_TRUST_STORE = "%s/" + MP_REST + "/trustStore"; public static final String REST_TRUST_STORE_PASSWORD = "%s/" + MP_REST + "/trustStorePassword"; public static final String REST_TRUST_STORE_TYPE = "%s/" + MP_REST + "/trustStoreType"; public static final String REST_KEY_STORE = "%s/" + MP_REST + "/keyStore"; public static final String REST_KEY_STORE_PASSWORD = "%s/" + MP_REST + "/keyStorePassword"; public static final String REST_KEY_STORE_TYPE = "%s/" + MP_REST + "/keyStoreType"; public static final String REST_HOSTNAME_VERIFIER = "%s/" + MP_REST + "/hostnameVerifier"; private final Class<?> proxyType; private final String baseUriFromAnnotation; private final String propertyPrefix; private final Config config; public RestClientBase(Class<?> proxyType, String baseUriFromAnnotation, String propertyPrefix) { this.proxyType = proxyType; this.baseUriFromAnnotation = baseUriFromAnnotation; this.config = ConfigProviderResolver.instance().getConfig(); this.propertyPrefix = propertyPrefix; } public Object create() { RestClientBuilder builder = RestClientBuilder.newBuilder(); configureBaseUrl(builder); configureTimeouts(builder); configureProviders(builder); configureSsl(builder); return builder.build(proxyType); } private void configureSsl(RestClientBuilder builder) { Optional<String> maybeTrustStore = getOptionalProperty(REST_TRUST_STORE, String.class); maybeTrustStore.ifPresent(trustStore -> registerTrustStore(trustStore, builder)); Optional<String> maybeKeyStore = getOptionalProperty(REST_KEY_STORE, String.class); maybeKeyStore.ifPresent(keyStore -> registerKeyStore(keyStore, builder)); Optional<String> maybeHostnameVerifier = getOptionalProperty(REST_HOSTNAME_VERIFIER, String.class); maybeHostnameVerifier.ifPresent(verifier -> registerHostnameVerifier(verifier, builder)); } private void registerHostnameVerifier(String verifier, RestClientBuilder builder) { try { Class<?> verifierClass = Class.forName(verifier, true, Thread.currentThread().getContextClassLoader()); builder.hostnameVerifier((HostnameVerifier) verifierClass.newInstance()); } catch (ClassNotFoundException e) { throw new RuntimeException("Could not find hostname verifier class" + verifier, e); } catch (InstantiationException | IllegalAccessException e) { throw new RuntimeException( "Failed to instantiate hostname verifier class. Make sure it has a public, no-argument constructor", e); } catch (ClassCastException e) { throw new RuntimeException("The provided hostname verifier " + verifier + " is not an instance of HostnameVerifier", e); } } private void registerKeyStore(String keyStorePath, RestClientBuilder builder) { Optional<String> keyStorePassword = getOptionalProperty(REST_KEY_STORE_PASSWORD, String.class); Optional<String> keyStoreType = getOptionalProperty(REST_KEY_STORE_TYPE, String.class); try { KeyStore keyStore = KeyStore.getInstance(keyStoreType.orElse("JKS")); String password = keyStorePassword .orElseThrow(() -> new IllegalArgumentException("No password provided for keystore")); try (InputStream input = locateStream(keyStorePath)) { keyStore.load(input, password.toCharArray()); } catch (IOException | CertificateException | NoSuchAlgorithmException e) { throw new IllegalArgumentException("Failed to initialize trust store from classpath resource " + keyStorePath, e); } builder.keyStore(keyStore, password); } catch (KeyStoreException e) { throw new IllegalArgumentException("Failed to initialize trust store from " + keyStorePath, e); } } private void registerTrustStore(String trustStorePath, RestClientBuilder builder) { Optional<String> maybeTrustStorePassword = getOptionalProperty(REST_TRUST_STORE_PASSWORD, String.class); Optional<String> maybeTrustStoreType = getOptionalProperty(REST_TRUST_STORE_TYPE, String.class); try { KeyStore trustStore = KeyStore.getInstance(maybeTrustStoreType.orElse("JKS")); String password = maybeTrustStorePassword .orElseThrow(() -> new IllegalArgumentException("No password provided for truststore")); try (InputStream input = locateStream(trustStorePath)) { trustStore.load(input, password.toCharArray()); } catch (IOException | CertificateException | NoSuchAlgorithmException e) { throw new IllegalArgumentException("Failed to initialize trust store from classpath resource " + trustStorePath, e); } builder.trustStore(trustStore); } catch (KeyStoreException e) { throw new IllegalArgumentException("Failed to initialize trust store from " + trustStorePath, e); } } private InputStream locateStream(String path) throws FileNotFoundException { if (path.startsWith("classpath:")) { path = path.replaceFirst("classpath:", ""); InputStream resultStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(path); if (resultStream == null) { resultStream = getClass().getResourceAsStream(path); } if (resultStream == null) { throw new IllegalArgumentException( "Classpath resource " + path + " not found for MicroProfile Rest Client SSL configuration"); } return resultStream; } else { if (path.startsWith("file:")) { path = path.replaceFirst("file:", ""); } File certificateFile = new File(path); if (!certificateFile.isFile()) { throw new IllegalArgumentException( "Certificate file: " + path + " not found for MicroProfile Rest Client SSL configuration"); } return new FileInputStream(certificateFile); } } private void configureProviders(RestClientBuilder builder) { Optional<String> maybeProviders = getOptionalProperty(REST_PROVIDERS, String.class); maybeProviders.ifPresent(providers -> registerProviders(builder, providers)); } private void registerProviders(RestClientBuilder builder, String providersAsString) { Stream.of(providersAsString.split(",")) .map(String::trim) .map(this::providerClassForName) .forEach(builder::register); } private Class<?> providerClassForName(String name) { try { return Class.forName(name, true, Thread.currentThread().getContextClassLoader()); } catch (ClassNotFoundException e) { throw new RuntimeException("Could not find provider class: " + name); } } private void configureTimeouts(RestClientBuilder builder) { Optional<Long> connectTimeout = getOptionalProperty(REST_CONNECT_TIMEOUT_FORMAT, Long.class); connectTimeout.ifPresent(timeout -> builder.connectTimeout(timeout, TimeUnit.MILLISECONDS)); Optional<Long> readTimeout = getOptionalProperty(REST_READ_TIMEOUT_FORMAT, Long.class); readTimeout.ifPresent(timeout -> builder.readTimeout(timeout, TimeUnit.MILLISECONDS)); } private <T> Optional<T> getOptionalProperty(String propertyFormat, Class<T> type) { Optional<T> interfaceNameValue = config.getOptionalValue(String.format(propertyFormat, proxyType.getName()), type); return interfaceNameValue.isPresent() ? interfaceNameValue : config.getOptionalValue(String.format(propertyFormat, propertyPrefix), type); } }
class RestClientBase { public static final String MP_REST = "mp-rest"; public static final String REST_URL_FORMAT = "%s/" + MP_REST + "/url"; public static final String REST_URI_FORMAT = "%s/" + MP_REST + "/uri"; public static final String REST_CONNECT_TIMEOUT_FORMAT = "%s/" + MP_REST + "/connectTimeout"; public static final String REST_READ_TIMEOUT_FORMAT = "%s/" + MP_REST + "/readTimeout"; public static final String REST_SCOPE_FORMAT = "%s/" + MP_REST + "/scope"; public static final String REST_PROVIDERS = "%s/" + MP_REST + "/providers"; public static final String REST_TRUST_STORE = "%s/" + MP_REST + "/trustStore"; public static final String REST_TRUST_STORE_PASSWORD = "%s/" + MP_REST + "/trustStorePassword"; public static final String REST_TRUST_STORE_TYPE = "%s/" + MP_REST + "/trustStoreType"; public static final String REST_KEY_STORE = "%s/" + MP_REST + "/keyStore"; public static final String REST_KEY_STORE_PASSWORD = "%s/" + MP_REST + "/keyStorePassword"; public static final String REST_KEY_STORE_TYPE = "%s/" + MP_REST + "/keyStoreType"; public static final String REST_HOSTNAME_VERIFIER = "%s/" + MP_REST + "/hostnameVerifier"; private final Class<?> proxyType; private final String baseUriFromAnnotation; private final String propertyPrefix; private final Config config; public RestClientBase(Class<?> proxyType, String baseUriFromAnnotation, String propertyPrefix) { this.proxyType = proxyType; this.baseUriFromAnnotation = baseUriFromAnnotation; this.config = ConfigProviderResolver.instance().getConfig(); this.propertyPrefix = propertyPrefix; } public Object create() { RestClientBuilder builder = RestClientBuilder.newBuilder(); configureBaseUrl(builder); configureTimeouts(builder); configureProviders(builder); configureSsl(builder); return builder.build(proxyType); } private void configureSsl(RestClientBuilder builder) { Optional<String> maybeTrustStore = getOptionalProperty(REST_TRUST_STORE, String.class); maybeTrustStore.ifPresent(trustStore -> registerTrustStore(trustStore, builder)); Optional<String> maybeKeyStore = getOptionalProperty(REST_KEY_STORE, String.class); maybeKeyStore.ifPresent(keyStore -> registerKeyStore(keyStore, builder)); Optional<String> maybeHostnameVerifier = getOptionalProperty(REST_HOSTNAME_VERIFIER, String.class); maybeHostnameVerifier.ifPresent(verifier -> registerHostnameVerifier(verifier, builder)); } private void registerHostnameVerifier(String verifier, RestClientBuilder builder) { try { Class<?> verifierClass = Class.forName(verifier, true, Thread.currentThread().getContextClassLoader()); builder.hostnameVerifier((HostnameVerifier) verifierClass.newInstance()); } catch (ClassNotFoundException e) { throw new RuntimeException("Could not find hostname verifier class" + verifier, e); } catch (InstantiationException | IllegalAccessException e) { throw new RuntimeException( "Failed to instantiate hostname verifier class. Make sure it has a public, no-argument constructor", e); } catch (ClassCastException e) { throw new RuntimeException("The provided hostname verifier " + verifier + " is not an instance of HostnameVerifier", e); } } private void registerKeyStore(String keyStorePath, RestClientBuilder builder) { Optional<String> keyStorePassword = getOptionalProperty(REST_KEY_STORE_PASSWORD, String.class); Optional<String> keyStoreType = getOptionalProperty(REST_KEY_STORE_TYPE, String.class); try { KeyStore keyStore = KeyStore.getInstance(keyStoreType.orElse("JKS")); String password = keyStorePassword .orElseThrow(() -> new IllegalArgumentException("No password provided for keystore")); try (InputStream input = locateStream(keyStorePath)) { keyStore.load(input, password.toCharArray()); } catch (IOException | CertificateException | NoSuchAlgorithmException e) { throw new IllegalArgumentException("Failed to initialize trust store from classpath resource " + keyStorePath, e); } builder.keyStore(keyStore, password); } catch (KeyStoreException e) { throw new IllegalArgumentException("Failed to initialize trust store from " + keyStorePath, e); } } private void registerTrustStore(String trustStorePath, RestClientBuilder builder) { Optional<String> maybeTrustStorePassword = getOptionalProperty(REST_TRUST_STORE_PASSWORD, String.class); Optional<String> maybeTrustStoreType = getOptionalProperty(REST_TRUST_STORE_TYPE, String.class); try { KeyStore trustStore = KeyStore.getInstance(maybeTrustStoreType.orElse("JKS")); String password = maybeTrustStorePassword .orElseThrow(() -> new IllegalArgumentException("No password provided for truststore")); try (InputStream input = locateStream(trustStorePath)) { trustStore.load(input, password.toCharArray()); } catch (IOException | CertificateException | NoSuchAlgorithmException e) { throw new IllegalArgumentException("Failed to initialize trust store from classpath resource " + trustStorePath, e); } builder.trustStore(trustStore); } catch (KeyStoreException e) { throw new IllegalArgumentException("Failed to initialize trust store from " + trustStorePath, e); } } private InputStream locateStream(String path) throws FileNotFoundException { if (path.startsWith("classpath:")) { path = path.replaceFirst("classpath:", ""); InputStream resultStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(path); if (resultStream == null) { resultStream = getClass().getResourceAsStream(path); } if (resultStream == null) { throw new IllegalArgumentException( "Classpath resource " + path + " not found for MicroProfile Rest Client SSL configuration"); } return resultStream; } else { if (path.startsWith("file:")) { path = path.replaceFirst("file:", ""); } File certificateFile = new File(path); if (!certificateFile.isFile()) { throw new IllegalArgumentException( "Certificate file: " + path + " not found for MicroProfile Rest Client SSL configuration"); } return new FileInputStream(certificateFile); } } private void configureProviders(RestClientBuilder builder) { Optional<String> maybeProviders = getOptionalProperty(REST_PROVIDERS, String.class); maybeProviders.ifPresent(providers -> registerProviders(builder, providers)); } private void registerProviders(RestClientBuilder builder, String providersAsString) { Stream.of(providersAsString.split(",")) .map(String::trim) .map(this::providerClassForName) .forEach(builder::register); } private Class<?> providerClassForName(String name) { try { return Class.forName(name, true, Thread.currentThread().getContextClassLoader()); } catch (ClassNotFoundException e) { throw new RuntimeException("Could not find provider class: " + name); } } private void configureTimeouts(RestClientBuilder builder) { Optional<Long> connectTimeout = getOptionalProperty(REST_CONNECT_TIMEOUT_FORMAT, Long.class); connectTimeout.ifPresent(timeout -> builder.connectTimeout(timeout, TimeUnit.MILLISECONDS)); Optional<Long> readTimeout = getOptionalProperty(REST_READ_TIMEOUT_FORMAT, Long.class); readTimeout.ifPresent(timeout -> builder.readTimeout(timeout, TimeUnit.MILLISECONDS)); } private <T> Optional<T> getOptionalProperty(String propertyFormat, Class<T> type) { Optional<T> interfaceNameValue = config.getOptionalValue(String.format(propertyFormat, proxyType.getName()), type); return interfaceNameValue.isPresent() ? interfaceNameValue : config.getOptionalValue(String.format(propertyFormat, propertyPrefix), type); } }
What should I do instead?
private Table toCalciteTable(String tableName, Entry entry) { if (entry.getSchema().getColumnsCount() == 0) { throw new UnsupportedOperationException( "Entry doesn't have a schema. Please attach a schema to '" + tableName + "' in Data Catalog: " + entry.toString()); } Schema schema = SchemaUtils.fromDataCatalog(entry.getSchema()); Optional<Table.Builder> tableBuilder = tableFactory.tableBuilder(entry); if (tableBuilder.isPresent()) { return tableBuilder.get().schema(schema).name(tableName).build(); } else { throw new UnsupportedOperationException( String.format( "Unsupported Data Catalog entry: %s", MoreObjects.toStringHelper(entry) .add("linkedResource", entry.getLinkedResource()) .add("hasGcsFilesetSpec", entry.hasGcsFilesetSpec()) .toString())); } }
return tableBuilder.get().schema(schema).name(tableName).build();
private Table toCalciteTable(String tableName, Entry entry) { if (entry.getSchema().getColumnsCount() == 0) { throw new UnsupportedOperationException( "Entry doesn't have a schema. Please attach a schema to '" + tableName + "' in Data Catalog: " + entry.toString()); } Schema schema = SchemaUtils.fromDataCatalog(entry.getSchema()); Optional<Table.Builder> tableBuilder = tableFactory.tableBuilder(entry); if (!tableBuilder.isPresent()) { throw new UnsupportedOperationException( String.format( "Unsupported Data Catalog entry: %s", MoreObjects.toStringHelper(entry) .add("linkedResource", entry.getLinkedResource()) .add("hasGcsFilesetSpec", entry.hasGcsFilesetSpec()) .toString())); } return tableBuilder.get().schema(schema).name(tableName).build(); }
class DataCatalogTableProvider extends FullNameTableProvider { private static final TableFactory PUBSUB_TABLE_FACTORY = new PubsubTableFactory(); private static final TableFactory GCS_TABLE_FACTORY = new GcsTableFactory(); private final Map<String, TableProvider> delegateProviders; private final DataCatalogBlockingStub dataCatalog; private final Map<String, Table> tableCache; private final TableFactory tableFactory; private DataCatalogTableProvider( Map<String, TableProvider> delegateProviders, DataCatalogBlockingStub dataCatalog, boolean truncateTimestamps) { this.tableCache = new HashMap<>(); this.delegateProviders = ImmutableMap.copyOf(delegateProviders); this.dataCatalog = dataCatalog; this.tableFactory = ChainedTableFactory.of( PUBSUB_TABLE_FACTORY, GCS_TABLE_FACTORY, new BigQueryTableFactory(truncateTimestamps)); } public static DataCatalogTableProvider create(DataCatalogPipelineOptions options) { return new DataCatalogTableProvider( getSupportedProviders(), createDataCatalogClient(options), options.getTruncateTimestamps()); } @Override public String getTableType() { return "google.cloud.datacatalog"; } @Override public void createTable(Table table) { throw new UnsupportedOperationException( "Creating tables is not supported with DataCatalog table provider."); } @Override public void dropTable(String tableName) { throw new UnsupportedOperationException( "Dropping tables is not supported with DataCatalog table provider"); } @Override public Map<String, Table> getTables() { throw new UnsupportedOperationException("Loading all tables from DataCatalog is not supported"); } @Override public @Nullable Table getTable(String tableName) { return loadTable(tableName); } @Override public @Nullable Table getTableByFullName(TableName fullTableName) { ImmutableList<String> allNameParts = ImmutableList.<String>builder() .addAll(fullTableName.getPath()) .add(fullTableName.getTableName()) .build(); String fullEscapedTableName = ZetaSqlIdUtils.escapeAndJoin(allNameParts); return loadTable(fullEscapedTableName); } @Override public BeamSqlTable buildBeamSqlTable(Table table) { return delegateProviders.get(table.getType()).buildBeamSqlTable(table); } private @Nullable Table loadTable(String tableName) { if (!tableCache.containsKey(tableName)) { tableCache.put(tableName, loadTableFromDC(tableName)); } return tableCache.get(tableName); } private Table loadTableFromDC(String tableName) { try { return toCalciteTable( tableName, dataCatalog.lookupEntry( LookupEntryRequest.newBuilder().setSqlResource(tableName).build())); } catch (StatusRuntimeException e) { if (e.getStatus().equals(Status.INVALID_ARGUMENT)) { return null; } throw new RuntimeException(e); } } private static DataCatalogBlockingStub createDataCatalogClient( DataCatalogPipelineOptions options) { return DataCatalogGrpc.newBlockingStub( ManagedChannelBuilder.forTarget(options.getDataCatalogEndpoint()).build()) .withCallCredentials( MoreCallCredentials.from(options.as(GcpOptions.class).getGcpCredential())); } private static Map<String, TableProvider> getSupportedProviders() { return Stream.of( new PubsubJsonTableProvider(), new BigQueryTableProvider(), new TextTableProvider()) .collect(toMap(TableProvider::getTableType, p -> p)); } }
class DataCatalogTableProvider extends FullNameTableProvider { private static final TableFactory PUBSUB_TABLE_FACTORY = new PubsubTableFactory(); private static final TableFactory GCS_TABLE_FACTORY = new GcsTableFactory(); private static final Map<String, TableProvider> DELEGATE_PROVIDERS = Stream.of(new PubsubJsonTableProvider(), new BigQueryTableProvider(), new TextTableProvider()) .collect(toMap(TableProvider::getTableType, p -> p)); private final DataCatalogBlockingStub dataCatalog; private final Map<String, Table> tableCache; private final TableFactory tableFactory; private DataCatalogTableProvider( DataCatalogBlockingStub dataCatalog, boolean truncateTimestamps) { this.tableCache = new HashMap<>(); this.dataCatalog = dataCatalog; this.tableFactory = ChainedTableFactory.of( PUBSUB_TABLE_FACTORY, GCS_TABLE_FACTORY, new BigQueryTableFactory(truncateTimestamps)); } public static DataCatalogTableProvider create(DataCatalogPipelineOptions options) { return new DataCatalogTableProvider( createDataCatalogClient(options), options.getTruncateTimestamps()); } @Override public String getTableType() { return "google.cloud.datacatalog"; } @Override public void createTable(Table table) { throw new UnsupportedOperationException( "Creating tables is not supported with DataCatalog table provider."); } @Override public void dropTable(String tableName) { throw new UnsupportedOperationException( "Dropping tables is not supported with DataCatalog table provider"); } @Override public Map<String, Table> getTables() { throw new UnsupportedOperationException("Loading all tables from DataCatalog is not supported"); } @Override public @Nullable Table getTable(String tableName) { return loadTable(tableName); } @Override public @Nullable Table getTableByFullName(TableName fullTableName) { ImmutableList<String> allNameParts = ImmutableList.<String>builder() .addAll(fullTableName.getPath()) .add(fullTableName.getTableName()) .build(); String fullEscapedTableName = ZetaSqlIdUtils.escapeAndJoin(allNameParts); return loadTable(fullEscapedTableName); } @Override public BeamSqlTable buildBeamSqlTable(Table table) { return DELEGATE_PROVIDERS.get(table.getType()).buildBeamSqlTable(table); } private @Nullable Table loadTable(String tableName) { if (!tableCache.containsKey(tableName)) { tableCache.put(tableName, loadTableFromDC(tableName)); } return tableCache.get(tableName); } private Table loadTableFromDC(String tableName) { try { return toCalciteTable( tableName, dataCatalog.lookupEntry( LookupEntryRequest.newBuilder().setSqlResource(tableName).build())); } catch (StatusRuntimeException e) { if (e.getStatus().equals(Status.INVALID_ARGUMENT)) { return null; } throw new RuntimeException(e); } } private static DataCatalogBlockingStub createDataCatalogClient( DataCatalogPipelineOptions options) { return DataCatalogGrpc.newBlockingStub( ManagedChannelBuilder.forTarget(options.getDataCatalogEndpoint()).build()) .withCallCredentials( MoreCallCredentials.from(options.as(GcpOptions.class).getGcpCredential())); } }
I just added `CallCountOutputStream` to test the proper number of call count.
public void testWrite() throws IOException { ByteArrayOutputStream expected = new ByteArrayOutputStream(); ByteArrayOutputStream actual = new ByteArrayOutputStream(); UnownedOutputStream osActual = new UnownedOutputStream(actual); byte[] data0 = "Hello World!".getBytes(StandardCharsets.UTF_8); byte[] data1 = "Welcome!".getBytes(StandardCharsets.UTF_8); expected.write(data0, 0, data0.length); osActual.write(data0, 0, data0.length); expected.write(data1, 0, data1.length); osActual.write(data1, 0, data1.length); assertArrayEquals(expected.toByteArray(), actual.toByteArray()); }
assertArrayEquals(expected.toByteArray(), actual.toByteArray());
public void testWrite() throws IOException { CallCountOutputStream fsCount = new CallCountOutputStream(); FilterOutputStream fs = new FilterOutputStream(fsCount); CallCountOutputStream osCount = new CallCountOutputStream(); UnownedOutputStream os = new UnownedOutputStream(osCount); byte[] data = "Hello World!".getBytes(StandardCharsets.UTF_8); fs.write(data, 0, data.length); os.write(data, 0, data.length); fs.write('\n'); os.write('\n'); assertEquals(13, fsCount.callCnt); assertEquals(2, osCount.callCnt); assertArrayEquals(fsCount.toByteArray(), osCount.toByteArray()); }
class UnownedOutputStreamTest { @Rule public ExpectedException expectedException = ExpectedException.none(); private ByteArrayOutputStream baos; private UnownedOutputStream os; @Before public void setup() { baos = new ByteArrayOutputStream(); os = new UnownedOutputStream(baos); } @Test public void testHashCodeEqualsAndToString() throws Exception { assertEquals(baos.hashCode(), os.hashCode()); assertEquals("UnownedOutputStream{out=" + baos + "}", os.toString()); assertEquals(new UnownedOutputStream(baos), os); } @Test public void testClosingThrows() throws Exception { expectedException.expect(UnsupportedOperationException.class); expectedException.expectMessage("Caller does not own the underlying"); os.close(); } @Test }
class UnownedOutputStreamTest { @Rule public ExpectedException expectedException = ExpectedException.none(); private ByteArrayOutputStream baos; private UnownedOutputStream os; @Before public void setup() { baos = new ByteArrayOutputStream(); os = new UnownedOutputStream(baos); } @Test public void testHashCodeEqualsAndToString() throws Exception { assertEquals(baos.hashCode(), os.hashCode()); assertEquals("UnownedOutputStream{out=" + baos + "}", os.toString()); assertEquals(new UnownedOutputStream(baos), os); } @Test public void testClosingThrows() throws Exception { expectedException.expect(UnsupportedOperationException.class); expectedException.expectMessage("Caller does not own the underlying"); os.close(); } @Test private static final class CallCountOutputStream extends ByteArrayOutputStream { int callCnt; @Override public synchronized void write(int b) { callCnt++; super.write(b); } @Override public synchronized void write(byte[] b, int off, int len) { callCnt++; super.write(b, off, len); } } }
I'm not sure it would be good to emit info logs for split requests. These are per subtask and there could be hundreds at a time depending on parallelism.
public void handleSplitRequest(int subtaskId, String requesterHostname) { LOG.debug( "handleSplitRequest subtask={} sourceIndex={} pendingSplits={}", subtaskId, currentSourceIndex, pendingSplits); assignPendingSplits(subtaskId); currentEnumerator.handleSplitRequest(subtaskId, requesterHostname); }
pendingSplits);
public void handleSplitRequest(int subtaskId, String requesterHostname) { LOG.debug( "handleSplitRequest subtask={} sourceIndex={} pendingSplits={}", subtaskId, currentSourceIndex, pendingSplits); Preconditions.checkState(pendingSplits.isEmpty() || !pendingSplits.containsKey(subtaskId)); currentEnumerator.handleSplitRequest(subtaskId, requesterHostname); }
class HybridSourceSplitEnumerator<SplitT extends SourceSplit> implements SplitEnumerator<HybridSourceSplit<SplitT>, HybridSourceEnumeratorState> { private static final Logger LOG = LoggerFactory.getLogger(HybridSourceSplitEnumerator.class); private final SplitEnumeratorContext<HybridSourceSplit> context; private final HybridSource.SourceChain<?, SplitT, Object> sourceChain; private final Map<Integer, List<HybridSourceSplit<SplitT>>> assignments; private final Map<Integer, TreeMap<Integer, List<HybridSourceSplit<SplitT>>>> pendingSplits; private final HashSet<Integer> pendingReaders; private int currentSourceIndex; private SplitEnumerator<SplitT, Object> currentEnumerator; public HybridSourceSplitEnumerator( SplitEnumeratorContext<HybridSourceSplit> context, HybridSource.SourceChain<?, SplitT, Object> sourceChain) { this(context, sourceChain, 0); } public HybridSourceSplitEnumerator( SplitEnumeratorContext<HybridSourceSplit> context, HybridSource.SourceChain<?, SplitT, Object> sourceChain, int initialSourceIndex) { Preconditions.checkArgument(initialSourceIndex < sourceChain.sources.size()); this.context = context; this.sourceChain = sourceChain; this.currentSourceIndex = initialSourceIndex; this.assignments = new HashMap<>(); this.pendingSplits = new HashMap<>(); this.pendingReaders = new HashSet<>(); } @Override public void start() { switchEnumerator(); } @Override @Override public void addSplitsBack(List<HybridSourceSplit<SplitT>> splits, int subtaskId) { LOG.debug("Adding splits back for subtask={} {}", subtaskId, splits); TreeMap<Integer, List<HybridSourceSplit<SplitT>>> splitsBySourceIndex = new TreeMap<>(); for (HybridSourceSplit<SplitT> split : splits) { splitsBySourceIndex .computeIfAbsent(split.sourceIndex(), k -> new ArrayList<>()) .add(split); } splitsBySourceIndex.forEach( (k, splitsPerSource) -> { if (k == currentSourceIndex) { currentEnumerator.addSplitsBack( HybridSourceReader.unwrapSplits(splitsPerSource), subtaskId); } else { pendingSplits .computeIfAbsent(subtaskId, sourceIndex -> new TreeMap<>()) .put(k, splitsPerSource); if (context.registeredReaders().containsKey(subtaskId)) { assignPendingSplits(subtaskId); } } }); } @Override public void addReader(int subtaskId) { LOG.debug("addReader subtaskId={}", subtaskId); if (pendingSplits.isEmpty()) { context.sendEventToSourceReader(subtaskId, new SwitchSourceEvent(currentSourceIndex)); LOG.debug("Adding reader {} to enumerator {}", subtaskId, currentSourceIndex); currentEnumerator.addReader(subtaskId); } else { pendingReaders.add(subtaskId); assignPendingSplits(subtaskId); } } private void assignPendingSplits(int subtaskId) { TreeMap<Integer, List<HybridSourceSplit<SplitT>>> splitsBySource = pendingSplits.get(subtaskId); if (splitsBySource != null) { int sourceIndex = splitsBySource.firstKey(); List<HybridSourceSplit<SplitT>> splits = Preconditions.checkNotNull(splitsBySource.get(sourceIndex)); Preconditions.checkState(!splits.isEmpty()); LOG.debug("Assigning pending splits subtask={} {}", subtaskId, splits); context.sendEventToSourceReader(subtaskId, new SwitchSourceEvent(sourceIndex)); context.assignSplits( new SplitsAssignment<HybridSourceSplit>( Collections.singletonMap(subtaskId, (List) splits))); context.signalNoMoreSplits(subtaskId); splits.clear(); } } @Override public HybridSourceEnumeratorState snapshotState(long checkpointId) throws Exception { Object enumState = currentEnumerator.snapshotState(checkpointId); return new HybridSourceEnumeratorState(currentSourceIndex, enumState); } @Override public void handleSourceEvent(int subtaskId, SourceEvent sourceEvent) { if (sourceEvent instanceof SourceReaderFinishedEvent) { SourceReaderFinishedEvent srfe = (SourceReaderFinishedEvent) sourceEvent; if (srfe.sourceIndex() != currentSourceIndex) { if (srfe.sourceIndex() < currentSourceIndex) { TreeMap<Integer, List<HybridSourceSplit<SplitT>>> splitsBySource = pendingSplits.get(subtaskId); if (splitsBySource != null) { List<HybridSourceSplit<SplitT>> splits = splitsBySource.get(srfe.sourceIndex()); if (splits != null && splits.isEmpty()) { splitsBySource.remove(srfe.sourceIndex()); } if (splitsBySource.isEmpty()) { pendingSplits.remove(subtaskId); } else { Integer nextSubtaskSourceIndex = splitsBySource.firstKey(); LOG.debug( "Restore subtask={}, sourceIndex={}", subtaskId, nextSubtaskSourceIndex); context.sendEventToSourceReader( subtaskId, new SwitchSourceEvent(nextSubtaskSourceIndex)); assignPendingSplits(subtaskId); } } if (!pendingReaders.isEmpty() && pendingSplits.isEmpty()) { LOG.debug( "Adding pending readers {} to enumerator currentSourceIndex={}", pendingReaders, currentSourceIndex); for (int pendingReaderSubtaskId : pendingReaders) { context.sendEventToSourceReader( pendingReaderSubtaskId, new SwitchSourceEvent(currentSourceIndex)); } for (int pendingReaderSubtaskId : pendingReaders) { currentEnumerator.addReader(pendingReaderSubtaskId); } pendingReaders.clear(); } } else { LOG.debug("Ignoring out of order event {}", srfe); } return; } this.assignments.remove(subtaskId); LOG.info( "Reader finished for subtask {} remaining assignments {}", subtaskId, assignments); if (this.assignments.isEmpty()) { LOG.debug("No assignments remaining, ready to switch readers!"); if (currentSourceIndex + 1 < sourceChain.sources.size()) { switchEnumerator(); for (int i = 0; i < context.currentParallelism(); i++) { context.sendEventToSourceReader( i, new SwitchSourceEvent(currentSourceIndex)); } for (int i = 0; i < context.currentParallelism(); i++) { LOG.debug("adding reader subtask={} sourceIndex={}", i, currentSourceIndex); currentEnumerator.addReader(i); } } } } else { currentEnumerator.handleSourceEvent(subtaskId, sourceEvent); } } @Override public void close() throws IOException { currentEnumerator.close(); } private void switchEnumerator() { Object enumeratorState = null; if (currentEnumerator != null) { try { enumeratorState = currentEnumerator.snapshotState(-1); currentEnumerator.close(); } catch (Exception e) { throw new RuntimeException(e); } currentEnumerator = null; currentSourceIndex++; } SplitEnumeratorContextProxy delegatingContext = new SplitEnumeratorContextProxy(currentSourceIndex, context, assignments); Source<?, ? extends SourceSplit, Object> source = (Source) sourceChain.sources.get(currentSourceIndex).f0; HybridSource.CheckpointConverter<Object, Object> converter = (HybridSource.CheckpointConverter) sourceChain.sources.get(currentSourceIndex).f1; try { if (converter != null) { currentEnumerator = source.restoreEnumerator( delegatingContext, converter.apply(enumeratorState)); } else { currentEnumerator = source.createEnumerator(delegatingContext); } } catch (Exception e) { throw new RuntimeException( "Failed to create enumerator for sourceIndex=" + currentSourceIndex, e); } LOG.info("Starting enumerator for sourceIndex={}", currentSourceIndex); currentEnumerator.start(); } /** * The {@link SplitEnumeratorContext} that is provided to the currently active enumerator. * * <p>This context is used to wrap the splits into {@link HybridSourceSplit} and track * assignment to readers. */ private static class SplitEnumeratorContextProxy<SplitT extends SourceSplit> implements SplitEnumeratorContext<SplitT> { private static final Logger LOG = LoggerFactory.getLogger(SplitEnumeratorContextProxy.class); private final SplitEnumeratorContext<HybridSourceSplit<?>> realContext; private final int sourceIndex; private final Map<Integer, List<HybridSourceSplit<?>>> assignments; public SplitEnumeratorContextProxy( int sourceIndex, SplitEnumeratorContext<HybridSourceSplit<?>> realContext, Map<Integer, List<HybridSourceSplit<?>>> assignments) { this.realContext = realContext; this.sourceIndex = sourceIndex; this.assignments = assignments; } @Override public MetricGroup metricGroup() { return realContext.metricGroup(); } @Override public void sendEventToSourceReader(int subtaskId, SourceEvent event) { realContext.sendEventToSourceReader(subtaskId, event); } @Override public int currentParallelism() { return realContext.currentParallelism(); } @Override public Map<Integer, ReaderInfo> registeredReaders() { return realContext.registeredReaders(); } @Override public void assignSplits(SplitsAssignment<SplitT> newSplitAssignments) { Map<Integer, List<HybridSourceSplit<?>>> wrappedAssignmentMap = new HashMap<>(); for (Map.Entry<Integer, List<SplitT>> e : newSplitAssignments.assignment().entrySet()) { List<HybridSourceSplit<?>> splits = HybridSourceReader.wrapSplits(sourceIndex, e.getValue()); wrappedAssignmentMap.put(e.getKey(), splits); assignments.merge( e.getKey(), splits, (all, plus) -> { all.addAll(plus); return all; }); } SplitsAssignment<HybridSourceSplit<?>> wrappedAssignments = new SplitsAssignment<>(wrappedAssignmentMap); LOG.debug("Assigning splits sourceIndex={} {}", sourceIndex, wrappedAssignments); realContext.assignSplits(wrappedAssignments); } @Override public void assignSplit(SplitT split, int subtask) { HybridSourceSplit<SplitT> wrappedSplit = new HybridSourceSplit(sourceIndex, split); assignments.merge( subtask, new ArrayList<>(Arrays.asList(wrappedSplit)), (all, plus) -> { all.addAll(plus); return all; }); realContext.assignSplit(wrappedSplit, subtask); } @Override public void signalNoMoreSplits(int subtask) { realContext.signalNoMoreSplits(subtask); } @Override public <T> void callAsync(Callable<T> callable, BiConsumer<T, Throwable> handler) { realContext.callAsync(callable, handler); } @Override public <T> void callAsync( Callable<T> callable, BiConsumer<T, Throwable> handler, long initialDelay, long period) { realContext.callAsync(callable, handler, initialDelay, period); } @Override public void runInCoordinatorThread(Runnable runnable) { realContext.runInCoordinatorThread(runnable); } } }
class HybridSourceSplitEnumerator implements SplitEnumerator<HybridSourceSplit, HybridSourceEnumeratorState> { private static final Logger LOG = LoggerFactory.getLogger(HybridSourceSplitEnumerator.class); private final SplitEnumeratorContext<HybridSourceSplit> context; private final List<HybridSource.SourceListEntry> sources; private final Map<Integer, Source> switchedSources; private final Map<Integer, TreeMap<Integer, List<HybridSourceSplit>>> pendingSplits; private final Set<Integer> finishedReaders; private final Map<Integer, Integer> readerSourceIndex; private int currentSourceIndex; private Object restoredEnumeratorState; private SplitEnumerator<SourceSplit, Object> currentEnumerator; public HybridSourceSplitEnumerator( SplitEnumeratorContext<HybridSourceSplit> context, List<HybridSource.SourceListEntry> sources, int initialSourceIndex, Map<Integer, Source> switchedSources, Object restoredEnumeratorState) { Preconditions.checkArgument(initialSourceIndex < sources.size()); this.context = context; this.sources = sources; this.currentSourceIndex = initialSourceIndex; this.pendingSplits = new HashMap<>(); this.finishedReaders = new HashSet<>(); this.readerSourceIndex = new HashMap<>(); this.switchedSources = switchedSources; this.restoredEnumeratorState = restoredEnumeratorState; } @Override public void start() { switchEnumerator(); } @Override @Override public void addSplitsBack(List<HybridSourceSplit> splits, int subtaskId) { LOG.debug("Adding splits back for subtask={} splits={}", subtaskId, splits); TreeMap<Integer, List<HybridSourceSplit>> splitsBySourceIndex = new TreeMap<>(); for (HybridSourceSplit split : splits) { splitsBySourceIndex .computeIfAbsent(split.sourceIndex(), k -> new ArrayList<>()) .add(split); } splitsBySourceIndex.forEach( (k, splitsPerSource) -> { if (k == currentSourceIndex) { currentEnumerator.addSplitsBack( HybridSourceSplit.unwrapSplits(splitsPerSource), subtaskId); } else { pendingSplits .computeIfAbsent(subtaskId, sourceIndex -> new TreeMap<>()) .put(k, splitsPerSource); } }); } @Override public void addReader(int subtaskId) { LOG.debug("addReader subtaskId={}", subtaskId); readerSourceIndex.remove(subtaskId); } private void sendSwitchSourceEvent(int subtaskId, int sourceIndex) { readerSourceIndex.put(subtaskId, sourceIndex); Source source = Preconditions.checkNotNull(switchedSources.get(sourceIndex)); context.sendEventToSourceReader( subtaskId, new SwitchSourceEvent(sourceIndex, source, sourceIndex >= (sources.size() - 1))); TreeMap<Integer, List<HybridSourceSplit>> splitsBySource = pendingSplits.get(subtaskId); if (splitsBySource != null) { List<HybridSourceSplit> splits = splitsBySource.remove(sourceIndex); if (splits != null && !splits.isEmpty()) { LOG.debug("Restoring splits to subtask={} {}", subtaskId, splits); context.assignSplits( new SplitsAssignment<>(Collections.singletonMap(subtaskId, splits))); context.signalNoMoreSplits(subtaskId); } if (splitsBySource.isEmpty()) { pendingSplits.remove(subtaskId); } } if (sourceIndex == currentSourceIndex) { LOG.debug("adding reader subtask={} sourceIndex={}", subtaskId, currentSourceIndex); currentEnumerator.addReader(subtaskId); } } @Override public HybridSourceEnumeratorState snapshotState(long checkpointId) throws Exception { Object enumState = currentEnumerator.snapshotState(checkpointId); return new HybridSourceEnumeratorState(currentSourceIndex, enumState); } @Override public void handleSourceEvent(int subtaskId, SourceEvent sourceEvent) { LOG.debug( "handleSourceEvent {} subtask={} pendingSplits={}", sourceEvent, subtaskId, pendingSplits); if (sourceEvent instanceof SourceReaderFinishedEvent) { SourceReaderFinishedEvent srfe = (SourceReaderFinishedEvent) sourceEvent; int subtaskSourceIndex = readerSourceIndex.computeIfAbsent( subtaskId, k -> { LOG.debug( "New reader subtask={} sourceIndex={}", subtaskId, srfe.sourceIndex()); return srfe.sourceIndex(); }); if (srfe.sourceIndex() < subtaskSourceIndex) { return; } if (subtaskSourceIndex < currentSourceIndex) { subtaskSourceIndex++; sendSwitchSourceEvent(subtaskId, subtaskSourceIndex); return; } finishedReaders.add(subtaskId); if (finishedReaders.size() == context.currentParallelism()) { LOG.debug("All readers finished, ready to switch enumerator!"); if (currentSourceIndex + 1 < sources.size()) { switchEnumerator(); for (int i = 0; i < context.currentParallelism(); i++) { sendSwitchSourceEvent(i, currentSourceIndex); } } } } else { currentEnumerator.handleSourceEvent(subtaskId, sourceEvent); } } @Override public void close() throws IOException { currentEnumerator.close(); } private void switchEnumerator() { SplitEnumerator<SourceSplit, Object> previousEnumerator = currentEnumerator; if (currentEnumerator != null) { try { currentEnumerator.close(); } catch (Exception e) { throw new RuntimeException(e); } currentEnumerator = null; currentSourceIndex++; } HybridSource.SourceSwitchContext<?> switchContext = new HybridSource.SourceSwitchContext<Object>() { @Override public Object getPreviousEnumerator() { return previousEnumerator; } }; Source<?, ? extends SourceSplit, Object> source = switchedSources.computeIfAbsent( currentSourceIndex, k -> { return sources.get(currentSourceIndex).factory.create(switchContext); }); switchedSources.put(currentSourceIndex, source); SplitEnumeratorContextProxy delegatingContext = new SplitEnumeratorContextProxy(currentSourceIndex, context, readerSourceIndex); try { if (restoredEnumeratorState == null) { currentEnumerator = source.createEnumerator(delegatingContext); } else { LOG.info("Restoring enumerator for sourceIndex={}", currentSourceIndex); currentEnumerator = source.restoreEnumerator(delegatingContext, restoredEnumeratorState); restoredEnumeratorState = null; } } catch (Exception e) { throw new RuntimeException( "Failed to create enumerator for sourceIndex=" + currentSourceIndex, e); } LOG.info("Starting enumerator for sourceIndex={}", currentSourceIndex); currentEnumerator.start(); } /** * The {@link SplitEnumeratorContext} that is provided to the currently active enumerator. * * <p>This context is used to wrap the splits into {@link HybridSourceSplit} and track * assignment to readers. */ private static class SplitEnumeratorContextProxy<SplitT extends SourceSplit> implements SplitEnumeratorContext<SplitT> { private static final Logger LOG = LoggerFactory.getLogger(SplitEnumeratorContextProxy.class); private final SplitEnumeratorContext<HybridSourceSplit> realContext; private final int sourceIndex; private final Map<Integer, Integer> readerSourceIndex; private SplitEnumeratorContextProxy( int sourceIndex, SplitEnumeratorContext<HybridSourceSplit> realContext, Map<Integer, Integer> readerSourceIndex) { this.realContext = realContext; this.sourceIndex = sourceIndex; this.readerSourceIndex = readerSourceIndex; } @Override public MetricGroup metricGroup() { return realContext.metricGroup(); } @Override public void sendEventToSourceReader(int subtaskId, SourceEvent event) { realContext.sendEventToSourceReader(subtaskId, event); } @Override public int currentParallelism() { return realContext.currentParallelism(); } @Override public Map<Integer, ReaderInfo> registeredReaders() { Map<Integer, ReaderInfo> readers = realContext.registeredReaders(); if (readers.size() != readerSourceIndex.size()) { return filterRegisteredReaders(readers); } Integer lastIndex = null; for (Integer sourceIndex : readerSourceIndex.values()) { if (lastIndex != null && lastIndex != sourceIndex) { return filterRegisteredReaders(readers); } lastIndex = sourceIndex; } return readers; } private Map<Integer, ReaderInfo> filterRegisteredReaders(Map<Integer, ReaderInfo> readers) { Map<Integer, ReaderInfo> readersForSource = new HashMap<>(readers.size()); for (Map.Entry<Integer, ReaderInfo> e : readers.entrySet()) { if (readerSourceIndex.get(e.getKey()) == (Integer) sourceIndex) { readersForSource.put(e.getKey(), e.getValue()); } } return readersForSource; } @Override public void assignSplits(SplitsAssignment<SplitT> newSplitAssignments) { Map<Integer, List<HybridSourceSplit>> wrappedAssignmentMap = new HashMap<>(); for (Map.Entry<Integer, List<SplitT>> e : newSplitAssignments.assignment().entrySet()) { List<HybridSourceSplit> splits = HybridSourceSplit.wrapSplits(sourceIndex, e.getValue()); wrappedAssignmentMap.put(e.getKey(), splits); } SplitsAssignment<HybridSourceSplit> wrappedAssignments = new SplitsAssignment<>(wrappedAssignmentMap); LOG.debug("Assigning splits sourceIndex={} {}", sourceIndex, wrappedAssignments); realContext.assignSplits(wrappedAssignments); } @Override public void assignSplit(SplitT split, int subtask) { HybridSourceSplit wrappedSplit = new HybridSourceSplit(sourceIndex, split); realContext.assignSplit(wrappedSplit, subtask); } @Override public void signalNoMoreSplits(int subtask) { realContext.signalNoMoreSplits(subtask); } @Override public <T> void callAsync(Callable<T> callable, BiConsumer<T, Throwable> handler) { realContext.callAsync(callable, handler); } @Override public <T> void callAsync( Callable<T> callable, BiConsumer<T, Throwable> handler, long initialDelay, long period) { realContext.callAsync(callable, handler, initialDelay, period); } @Override public void runInCoordinatorThread(Runnable runnable) { realContext.runInCoordinatorThread(runnable); } } }
I'm not sure that would simplify anything in this case. Am I missing something though?
protected static void assertEventRoutesEqual(EventRoute expected, String expectedId, EventRoute actual) { assertEquals(expectedId, actual.getId()); assertEquals(expected.getEndpointName(), actual.getEndpointName()); assertEquals(expected.getFilter(), actual.getFilter()); }
assertEquals(expectedId, actual.getId());
protected static void assertEventRoutesEqual(EventRoute expected, String expectedId, EventRoute actual) { assertEquals(expectedId, actual.getId()); assertEquals(expected.getEndpointName(), actual.getEndpointName()); assertEquals(expected.getFilter(), actual.getFilter()); }
class EventRoutesTestBase extends DigitalTwinsTestBase { private final ClientLogger logger = new ClientLogger(EventRoutesTestBase.class); static final String EVENT_ROUTE_ENDPOINT_NAME = "someEventHubEndpoint"; static final String FILTER = "$eventType = 'DigitalTwinTelemetryMessages' or $eventType = 'DigitalTwinLifecycleNotification'"; @Test public abstract void eventRouteLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion); @Test public abstract void getEventRouteThrowsIfEventRouteDoesNotExist(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion); @Test public abstract void createEventRouteThrowsIfFilterIsMalformed(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion); @BeforeEach public void removeAllEventRoutes() { DigitalTwinsClient client = getDigitalTwinsClientBuilder(null, DigitalTwinsServiceVersion.getLatest()).buildClient(); PagedIterable<EventRoute> listedEventRoutes = client.listEventRoutes(); List<String> currentEventRouteIds = new ArrayList<>(); for (EventRoute listedEventRoute : listedEventRoutes) { currentEventRouteIds.add(listedEventRoute.getId()); } for (String eventRouteId : currentEventRouteIds) { logger.info("Deleting event route " + eventRouteId + " before running the next test"); client.deleteEventRoute(eventRouteId); } } }
class EventRoutesTestBase extends DigitalTwinsTestBase { private final ClientLogger logger = new ClientLogger(EventRoutesTestBase.class); static final String EVENT_ROUTE_ENDPOINT_NAME = "someEventHubEndpoint"; static final String FILTER = "$eventType = 'DigitalTwinTelemetryMessages' or $eventType = 'DigitalTwinLifecycleNotification'"; @Test public abstract void eventRouteLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion); @Test public abstract void getEventRouteThrowsIfEventRouteDoesNotExist(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion); @Test public abstract void createEventRouteThrowsIfFilterIsMalformed(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion); @BeforeEach public void removeAllEventRoutes() { DigitalTwinsClient client = getDigitalTwinsClientBuilder(null, DigitalTwinsServiceVersion.getLatest()).buildClient(); PagedIterable<EventRoute> listedEventRoutes = client.listEventRoutes(); List<String> currentEventRouteIds = new ArrayList<>(); for (EventRoute listedEventRoute : listedEventRoutes) { currentEventRouteIds.add(listedEventRoute.getId()); } for (String eventRouteId : currentEventRouteIds) { logger.info("Deleting event route " + eventRouteId + " before running the next test"); client.deleteEventRoute(eventRouteId); } } }
`values::add` instead of `Consumer<Elements>`? Your choice if you still want to provide the StreamObserverClientFactory type instead of a lambda as visible in the old code.
public void testOutboundObserver() { final Collection<BeamFnApi.Elements> values = new ArrayList<>(); BeamFnDataGrpcMultiplexer multiplexer = new BeamFnDataGrpcMultiplexer( DESCRIPTOR, new StreamObserverClientFactory<Elements, Elements>() { @Override public StreamObserver<Elements> outboundObserverFor( StreamObserver<Elements> inboundObserver) { return TestStreams.withOnNext( new Consumer<Elements>() { @Override public void accept(Elements item) { values.add(item); } }) .build(); } }); multiplexer.getOutboundObserver().onNext(ELEMENTS); assertThat(values, contains(ELEMENTS)); }
return TestStreams.withOnNext(
public void testOutboundObserver() { final Collection<BeamFnApi.Elements> values = new ArrayList<>(); BeamFnDataGrpcMultiplexer multiplexer = new BeamFnDataGrpcMultiplexer( DESCRIPTOR, inboundObserver -> TestStreams.withOnNext(values::add).build()); multiplexer.getOutboundObserver().onNext(ELEMENTS); assertThat(values, contains(ELEMENTS)); }
class BeamFnDataGrpcMultiplexerTest { private static final Endpoints.ApiServiceDescriptor DESCRIPTOR = Endpoints.ApiServiceDescriptor.newBuilder().setUrl("test").build(); private static final LogicalEndpoint OUTPUT_LOCATION = LogicalEndpoint.of( "777L", BeamFnApi.Target.newBuilder() .setName("name") .setPrimitiveTransformReference("888L") .build()); private static final BeamFnApi.Elements ELEMENTS = BeamFnApi.Elements.newBuilder() .addData(BeamFnApi.Elements.Data.newBuilder() .setInstructionReference(OUTPUT_LOCATION.getInstructionId()) .setTarget(OUTPUT_LOCATION.getTarget()) .setData(ByteString.copyFrom(new byte[1]))) .build(); private static final BeamFnApi.Elements TERMINAL_ELEMENTS = BeamFnApi.Elements.newBuilder() .addData(BeamFnApi.Elements.Data.newBuilder() .setInstructionReference(OUTPUT_LOCATION.getInstructionId()) .setTarget(OUTPUT_LOCATION.getTarget())) .build(); @Test @Test public void testInboundObserverBlocksTillConsumerConnects() throws Exception { final Collection<BeamFnApi.Elements> outboundValues = new ArrayList<>(); final Collection<BeamFnApi.Elements.Data> inboundValues = new ArrayList<>(); final BeamFnDataGrpcMultiplexer multiplexer = new BeamFnDataGrpcMultiplexer( DESCRIPTOR, new StreamObserverClientFactory<Elements, Elements>() { @Override public StreamObserver<Elements> outboundObserverFor( StreamObserver<Elements> inboundObserver) { return TestStreams.withOnNext( new Consumer<Elements>() { @Override public void accept(Elements item) { outboundValues.add(item); } }) .build(); } }); ExecutorService executorService = Executors.newCachedThreadPool(); executorService.submit( new Runnable() { @Override public void run() { Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); multiplexer.registerConsumer(OUTPUT_LOCATION, inboundValues::add); } }); multiplexer.getInboundObserver().onNext(ELEMENTS); assertTrue(multiplexer.hasConsumer(OUTPUT_LOCATION)); multiplexer.getInboundObserver().onNext(TERMINAL_ELEMENTS); assertFalse(multiplexer.hasConsumer(OUTPUT_LOCATION)); assertThat(inboundValues, contains(ELEMENTS.getData(0), TERMINAL_ELEMENTS.getData(0))); } }
class BeamFnDataGrpcMultiplexerTest { private static final Endpoints.ApiServiceDescriptor DESCRIPTOR = Endpoints.ApiServiceDescriptor.newBuilder().setUrl("test").build(); private static final LogicalEndpoint OUTPUT_LOCATION = LogicalEndpoint.of( "777L", BeamFnApi.Target.newBuilder() .setName("name") .setPrimitiveTransformReference("888L") .build()); private static final BeamFnApi.Elements ELEMENTS = BeamFnApi.Elements.newBuilder() .addData(BeamFnApi.Elements.Data.newBuilder() .setInstructionReference(OUTPUT_LOCATION.getInstructionId()) .setTarget(OUTPUT_LOCATION.getTarget()) .setData(ByteString.copyFrom(new byte[1]))) .build(); private static final BeamFnApi.Elements TERMINAL_ELEMENTS = BeamFnApi.Elements.newBuilder() .addData(BeamFnApi.Elements.Data.newBuilder() .setInstructionReference(OUTPUT_LOCATION.getInstructionId()) .setTarget(OUTPUT_LOCATION.getTarget())) .build(); @Test @Test public void testInboundObserverBlocksTillConsumerConnects() throws Exception { final Collection<BeamFnApi.Elements> outboundValues = new ArrayList<>(); final Collection<BeamFnApi.Elements.Data> inboundValues = new ArrayList<>(); final BeamFnDataGrpcMultiplexer multiplexer = new BeamFnDataGrpcMultiplexer( DESCRIPTOR, inboundObserver -> TestStreams.withOnNext(outboundValues::add).build()); ExecutorService executorService = Executors.newCachedThreadPool(); executorService.submit( () -> { Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); multiplexer.registerConsumer(OUTPUT_LOCATION, inboundValues::add); }); multiplexer.getInboundObserver().onNext(ELEMENTS); assertTrue(multiplexer.hasConsumer(OUTPUT_LOCATION)); multiplexer.getInboundObserver().onNext(TERMINAL_ELEMENTS); assertFalse(multiplexer.hasConsumer(OUTPUT_LOCATION)); assertThat(inboundValues, contains(ELEMENTS.getData(0), TERMINAL_ELEMENTS.getData(0))); } }
Not true, `orElseGet` can only use in the direct value. In this situation, we need get configuration in the optional value, it is unnecessary to mock the configuration when rule absent.
public void init(final ShardingSphereDatabase database, final SQLStatement sqlStatement) { Optional<ShardingRule> rule = database.getRuleMetaData().findSingleRule(ShardingRule.class); data = rule.map(shardingRule -> ((ShardingRuleConfiguration) shardingRule.getConfiguration()).getScaling().entrySet().iterator()).orElse(Collections.emptyIterator()); }
data = rule.map(shardingRule -> ((ShardingRuleConfiguration) shardingRule.getConfiguration()).getScaling().entrySet().iterator()).orElse(Collections.emptyIterator());
public void init(final ShardingSphereDatabase database, final SQLStatement sqlStatement) { Optional<ShardingRule> rule = database.getRuleMetaData().findSingleRule(ShardingRule.class); data = rule.map(optional -> ((ShardingRuleConfiguration) optional.getConfiguration()).getScaling().entrySet().iterator()).orElse(Collections.emptyIterator()); }
class ShardingScalingRulesQueryResultSet implements DistSQLResultSet { private Iterator<Entry<String, OnRuleAlteredActionConfiguration>> data; @Override @Override public Collection<String> getColumnNames() { return Arrays.asList("name", "input", "output", "stream_channel", "completion_detector", "data_consistency_checker"); } @Override public boolean next() { return data.hasNext(); } @Override public Collection<Object> getRowData() { return buildRowData(data.next()); } private Collection<Object> buildRowData(final Entry<String, OnRuleAlteredActionConfiguration> data) { Collection<Object> result = new LinkedList<>(); result.add(data.getKey()); OnRuleAlteredActionConfiguration shardingScalingRule = data.getValue(); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getInput())); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getOutput())); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getStreamChannel())); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getCompletionDetector())); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getDataConsistencyCalculator())); return result; } private String getString(final Object obj) { return null == obj ? "" : new Gson().toJson(obj); } @Override public String getType() { return ShowShardingScalingRulesStatement.class.getName(); } }
class ShardingScalingRulesQueryResultSet implements DistSQLResultSet { private Iterator<Entry<String, OnRuleAlteredActionConfiguration>> data; @Override @Override public Collection<String> getColumnNames() { return Arrays.asList("name", "input", "output", "stream_channel", "completion_detector", "data_consistency_checker"); } @Override public boolean next() { return data.hasNext(); } @Override public Collection<Object> getRowData() { return buildRowData(data.next()); } private Collection<Object> buildRowData(final Entry<String, OnRuleAlteredActionConfiguration> data) { Collection<Object> result = new LinkedList<>(); result.add(data.getKey()); OnRuleAlteredActionConfiguration shardingScalingRule = data.getValue(); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getInput())); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getOutput())); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getStreamChannel())); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getCompletionDetector())); result.add(null == shardingScalingRule ? "" : getString(shardingScalingRule.getDataConsistencyCalculator())); return result; } private String getString(final Object obj) { return null == obj ? "" : new Gson().toJson(obj); } @Override public String getType() { return ShowShardingScalingRulesStatement.class.getName(); } }
Okay @wgy8283335, I'll put `DeleteStatement` as a member in the `MergeStatement`. So that we can visit `deleteWhereClause` and put it inside `DeleteStatement`'s `WhereSegment`.
public ASTNode visitMerge(final MergeContext ctx) { OracleMergeStatement result = new OracleMergeStatement(); result.setTarget((SimpleTableSegment) visit(ctx.intoClause())); result.setSource((TableSegment) visit(ctx.usingClause())); result.setExpr((ExpressionSegment) (visit(ctx.usingClause().expr()))); if (null != ctx.mergeUpdateClause()) { result.getUpdate().setSetAssignment((SetAssignmentSegment) visit(ctx.mergeUpdateClause().mergeSetAssignmentsClause())); if (null != ctx.mergeUpdateClause().whereClause()) { result.getUpdate().setWhere((WhereSegment) visit(ctx.mergeUpdateClause().whereClause())); } if (null != ctx.mergeUpdateClause().deleteWhereClause()) { result.getUpdate().setWhere((WhereSegment) visit(ctx.mergeUpdateClause().deleteWhereClause())); } } return result; }
result.getUpdate().setWhere((WhereSegment) visit(ctx.mergeUpdateClause().deleteWhereClause()));
public ASTNode visitMerge(final MergeContext ctx) { OracleMergeStatement result = new OracleMergeStatement(); result.setTarget((SimpleTableSegment) visit(ctx.intoClause())); result.setSource((TableSegment) visit(ctx.usingClause())); result.setExpr((ExpressionSegment) (visit(ctx.usingClause().expr()))); if (null != ctx.mergeUpdateClause()) { result.getUpdate().setSetAssignment((SetAssignmentSegment) visit(ctx.mergeUpdateClause().mergeSetAssignmentsClause())); if (null != ctx.mergeUpdateClause().whereClause()) { result.getUpdate().setWhere((WhereSegment) visit(ctx.mergeUpdateClause().whereClause())); } if (null != ctx.mergeUpdateClause().deleteWhereClause()) { result.getDelete().setWhere((WhereSegment) visit(ctx.mergeUpdateClause().deleteWhereClause())); } } return result; }
class OracleDMLStatementSQLVisitor extends OracleStatementSQLVisitor implements DMLSQLVisitor, SQLStatementVisitor { public OracleDMLStatementSQLVisitor(final Properties props) { super(props); } @Override public ASTNode visitInsert(final InsertContext ctx) { if (null != ctx.insertSingleTable()) { OracleInsertStatement result = (OracleInsertStatement) visit(ctx.insertSingleTable().insertValuesClause()); result.setTable((SimpleTableSegment) visit(ctx.insertSingleTable().insertIntoClause().tableName())); result.setParameterCount(getCurrentParameterIndex()); return result; } return new OracleInsertStatement(); } @SuppressWarnings("unchecked") @Override public ASTNode visitInsertValuesClause(final InsertValuesClauseContext ctx) { OracleInsertStatement result = new OracleInsertStatement(); if (null != ctx.columnNames()) { ColumnNamesContext columnNames = ctx.columnNames(); CollectionValue<ColumnSegment> columnSegments = (CollectionValue<ColumnSegment>) visit(columnNames); result.setInsertColumns(new InsertColumnsSegment(columnNames.start.getStartIndex(), columnNames.stop.getStopIndex(), columnSegments.getValue())); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.getValues().addAll(createInsertValuesSegments(ctx.assignmentValues())); return result; } private Collection<InsertValuesSegment> createInsertValuesSegments(final Collection<AssignmentValuesContext> assignmentValuesContexts) { Collection<InsertValuesSegment> result = new LinkedList<>(); for (AssignmentValuesContext each : assignmentValuesContexts) { result.add((InsertValuesSegment) visit(each)); } return result; } @Override public ASTNode visitUpdate(final UpdateContext ctx) { OracleUpdateStatement result = new OracleUpdateStatement(); result.setTableSegment((TableSegment) visit(ctx.tableReferences())); result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause())); if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } result.setParameterCount(getCurrentParameterIndex()); return result; } @Override public ASTNode visitSetAssignmentsClause(final SetAssignmentsClauseContext ctx) { Collection<AssignmentSegment> assignments = new LinkedList<>(); for (AssignmentContext each : ctx.assignment()) { assignments.add((AssignmentSegment) visit(each)); } return new SetAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), assignments); } @Override public ASTNode visitAssignmentValues(final AssignmentValuesContext ctx) { List<ExpressionSegment> segments = new LinkedList<>(); for (AssignmentValueContext each : ctx.assignmentValue()) { segments.add((ExpressionSegment) visit(each)); } return new InsertValuesSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), segments); } @Override public ASTNode visitAssignment(final AssignmentContext ctx) { ColumnSegment column = (ColumnSegment) visitColumnName(ctx.columnName()); ExpressionSegment value = (ExpressionSegment) visit(ctx.assignmentValue()); return new AssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, value); } @Override public ASTNode visitAssignmentValue(final AssignmentValueContext ctx) { ExprContext expr = ctx.expr(); if (null != expr) { return visit(expr); } return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } @Override public ASTNode visitDelete(final DeleteContext ctx) { OracleDeleteStatement result = new OracleDeleteStatement(); if (null != ctx.multipleTablesClause()) { result.setTableSegment((TableSegment) visit(ctx.multipleTablesClause())); } else { result.setTableSegment((TableSegment) visit(ctx.singleTableClause())); } if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } result.setParameterCount(getCurrentParameterIndex()); return result; } @Override public ASTNode visitSingleTableClause(final SingleTableClauseContext ctx) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } @Override public ASTNode visitMultipleTablesClause(final MultipleTablesClauseContext ctx) { DeleteMultiTableSegment result = new DeleteMultiTableSegment(); TableSegment relateTableSource = (TableSegment) visit(ctx.tableReferences()); result.setRelationTable(relateTableSource); result.setActualDeleteTables(generateTablesFromTableMultipleTableNames(ctx.multipleTableNames())); return result; } private List<SimpleTableSegment> generateTablesFromTableMultipleTableNames(final MultipleTableNamesContext ctx) { List<SimpleTableSegment> result = new LinkedList<>(); for (TableNameContext each : ctx.tableName()) { result.add((SimpleTableSegment) visit(each)); } return result; } @Override public ASTNode visitSelect(final SelectContext ctx) { OracleSelectStatement result = (OracleSelectStatement) visit(ctx.unionClause()); result.setParameterCount(getCurrentParameterIndex()); return result; } @Override public ASTNode visitUnionClause(final UnionClauseContext ctx) { return visit(ctx.selectClause(0)); } @Override public ASTNode visitSelectClause(final SelectClauseContext ctx) { OracleSelectStatement result = new OracleSelectStatement(); result.setProjections((ProjectionsSegment) visit(ctx.projections())); if (null != ctx.duplicateSpecification()) { result.getProjections().setDistinctRow(isDistinct(ctx)); } if (null != ctx.fromClause()) { TableSegment tableSegment = (TableSegment) visit(ctx.fromClause()); result.setFrom(tableSegment); } if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.groupByClause()) { result.setGroupBy((GroupBySegment) visit(ctx.groupByClause())); } if (null != ctx.orderByClause()) { result.setOrderBy((OrderBySegment) visit(ctx.orderByClause())); } if (null != ctx.lockClause()) { result.setLock((LockSegment) visit(ctx.lockClause())); } return result; } private boolean isDistinct(final SelectClauseContext ctx) { return ((BooleanLiteralValue) visit(ctx.duplicateSpecification())).getValue(); } @Override public ASTNode visitDuplicateSpecification(final DuplicateSpecificationContext ctx) { return new BooleanLiteralValue(null != ctx.DISTINCT()); } @Override public ASTNode visitProjections(final ProjectionsContext ctx) { Collection<ProjectionSegment> projections = new LinkedList<>(); if (null != ctx.unqualifiedShorthand()) { projections.add(new ShorthandProjectionSegment(ctx.unqualifiedShorthand().getStart().getStartIndex(), ctx.unqualifiedShorthand().getStop().getStopIndex())); } for (ProjectionContext each : ctx.projection()) { projections.add((ProjectionSegment) visit(each)); } ProjectionsSegment result = new ProjectionsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); result.getProjections().addAll(projections); return result; } @Override public ASTNode visitProjection(final ProjectionContext ctx) { if (null != ctx.qualifiedShorthand()) { QualifiedShorthandContext shorthand = ctx.qualifiedShorthand(); ShorthandProjectionSegment result = new ShorthandProjectionSegment(shorthand.getStart().getStartIndex(), shorthand.getStop().getStopIndex()); IdentifierValue identifier = new IdentifierValue(shorthand.identifier().getText()); result.setOwner(new OwnerSegment(shorthand.identifier().getStart().getStartIndex(), shorthand.identifier().getStop().getStopIndex(), identifier)); return result; } AliasSegment alias = null == ctx.alias() ? null : (AliasSegment) visit(ctx.alias()); if (null != ctx.columnName()) { ColumnSegment column = (ColumnSegment) visit(ctx.columnName()); ColumnProjectionSegment result = new ColumnProjectionSegment(column); result.setAlias(alias); return result; } return createProjection(ctx, alias); } @Override public ASTNode visitAlias(final AliasContext ctx) { if (null != ctx.identifier()) { return new AliasSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier())); } return new AliasSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), new IdentifierValue(ctx.STRING_().getText())); } private ASTNode createProjection(final ProjectionContext ctx, final AliasSegment alias) { ASTNode projection = visit(ctx.expr()); if (projection instanceof AggregationProjectionSegment) { ((AggregationProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof ExpressionProjectionSegment) { ((ExpressionProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof CommonExpressionSegment) { CommonExpressionSegment segment = (CommonExpressionSegment) projection; ExpressionProjectionSegment result = new ExpressionProjectionSegment(segment.getStartIndex(), segment.getStopIndex(), segment.getText()); result.setAlias(alias); return result; } if (projection instanceof ColumnSegment) { ColumnProjectionSegment result = new ColumnProjectionSegment((ColumnSegment) projection); result.setAlias(alias); return result; } if (projection instanceof SubqueryExpressionSegment) { SubqueryProjectionSegment result = new SubqueryProjectionSegment(((SubqueryExpressionSegment) projection).getSubquery()); result.setAlias(alias); return result; } if (projection instanceof BinaryOperationExpression) { int startIndex = ((BinaryOperationExpression) projection).getStartIndex(); int stopIndex = null != alias ? alias.getStopIndex() : ((BinaryOperationExpression) projection).getStopIndex(); ExpressionProjectionSegment result = new ExpressionProjectionSegment(startIndex, stopIndex, ((BinaryOperationExpression) projection).getText()); result.setAlias(alias); return result; } LiteralExpressionSegment column = (LiteralExpressionSegment) projection; ExpressionProjectionSegment result = null == alias ? new ExpressionProjectionSegment(column.getStartIndex(), column.getStopIndex(), String.valueOf(column.getLiterals())) : new ExpressionProjectionSegment(column.getStartIndex(), ctx.alias().stop.getStopIndex(), String.valueOf(column.getLiterals())); result.setAlias(alias); return result; } @Override public ASTNode visitFromClause(final FromClauseContext ctx) { return visit(ctx.tableReferences()); } @Override public ASTNode visitTableReferences(final TableReferencesContext ctx) { TableSegment result = (TableSegment) visit(ctx.tableReference(0)); if (ctx.tableReference().size() > 1) { for (int i = 1; i < ctx.tableReference().size(); i++) { result = generateJoinTableSourceFromTableReference(ctx.tableReference(i), result); } } return result; } private JoinTableSegment generateJoinTableSourceFromTableReference(final TableReferenceContext ctx, final TableSegment tableSegment) { JoinTableSegment result = new JoinTableSegment(); result.setStartIndex(tableSegment.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); result.setLeft(tableSegment); result.setRight((TableSegment) visit(ctx)); return result; } @Override public ASTNode visitTableReference(final TableReferenceContext ctx) { TableSegment result; TableSegment left; left = (TableSegment) visit(ctx.tableFactor()); if (!ctx.joinedTable().isEmpty()) { for (JoinedTableContext each : ctx.joinedTable()) { left = visitJoinedTable(each, left); } } result = left; return result; } @Override public ASTNode visitTableFactor(final TableFactorContext ctx) { if (null != ctx.subquery()) { OracleSelectStatement subquery = (OracleSelectStatement) visit(ctx.subquery()); SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), subquery); SubqueryTableSegment result = new SubqueryTableSegment(subquerySegment); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } if (null != ctx.tableName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } return visit(ctx.tableReferences()); } private JoinTableSegment visitJoinedTable(final JoinedTableContext ctx, final TableSegment tableSegment) { JoinTableSegment result = new JoinTableSegment(); result.setLeft(tableSegment); result.setStartIndex(tableSegment.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); TableSegment right = (TableSegment) visit(ctx.tableFactor()); result.setRight(right); if (null != ctx.joinSpecification()) { result = visitJoinSpecification(ctx.joinSpecification(), result); } return result; } private JoinTableSegment visitJoinSpecification(final JoinSpecificationContext ctx, final JoinTableSegment joinTableSource) { if (null != ctx.expr()) { ExpressionSegment condition = (ExpressionSegment) visit(ctx.expr()); joinTableSource.setCondition(condition); } if (null != ctx.USING()) { List<ColumnSegment> columnSegmentList = new LinkedList<>(); for (ColumnNameContext cname : ctx.columnNames().columnName()) { columnSegmentList.add((ColumnSegment) visit(cname)); } joinTableSource.setUsing(columnSegmentList); } return joinTableSource; } @Override public ASTNode visitWhereClause(final WhereClauseContext ctx) { ASTNode segment = visit(ctx.expr()); return new WhereSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ExpressionSegment) segment); } @Override public ASTNode visitGroupByClause(final GroupByClauseContext ctx) { Collection<OrderByItemSegment> items = new LinkedList<>(); for (OrderByItemContext each : ctx.orderByItem()) { items.add((OrderByItemSegment) visit(each)); } return new GroupBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items); } @Override public ASTNode visitSubquery(final SubqueryContext ctx) { return visit(ctx.unionClause()); } @Override public ASTNode visitLockClause(final LockClauseContext ctx) { return new LockSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); } @Override @Override public ASTNode visitIntoClause(final IntoClauseContext ctx) { if (null != ctx.tableName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } SimpleTableSegment result = (SimpleTableSegment) visit(ctx.viewName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } @Override public ASTNode visitUsingClause(final UsingClauseContext ctx) { if (null != ctx.tableName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } if (null != ctx.viewName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.viewName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } OracleSelectStatement subquery = (OracleSelectStatement) visit(ctx.subquery()); SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), subquery); SubqueryTableSegment result = new SubqueryTableSegment(subquerySegment); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } @Override public ASTNode visitMergeUpdateClause(final MergeUpdateClauseContext ctx) { OracleUpdateStatement result = new OracleUpdateStatement(); result.setSetAssignment((SetAssignmentSegment) visit(ctx.mergeSetAssignmentsClause())); if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.deleteWhereClause()) { result.setWhere((WhereSegment) visit(ctx.deleteWhereClause())); } return result; } @Override public ASTNode visitMergeSetAssignmentsClause(final MergeSetAssignmentsClauseContext ctx) { Collection<AssignmentSegment> assignments = new LinkedList<>(); for (MergeAssignmentContext each : ctx.mergeAssignment()) { assignments.add((AssignmentSegment) visit(each)); } return new SetAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), assignments); } @Override public ASTNode visitMergeAssignment(final MergeAssignmentContext ctx) { ColumnSegment column = (ColumnSegment) visitColumnName(ctx.columnName()); ExpressionSegment value = (ExpressionSegment) visit(ctx.mergeAssignmentValue()); return new AssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, value); } @Override public ASTNode visitMergeAssignmentValue(final MergeAssignmentValueContext ctx) { ExprContext expr = ctx.expr(); if (null != expr) { return visit(expr); } return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } @Override public ASTNode visitDeleteWhereClause(final DeleteWhereClauseContext ctx) { ASTNode segment = visit(ctx.whereClause().expr()); return new WhereSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ExpressionSegment) segment); } }
class OracleDMLStatementSQLVisitor extends OracleStatementSQLVisitor implements DMLSQLVisitor, SQLStatementVisitor { public OracleDMLStatementSQLVisitor(final Properties props) { super(props); } @Override public ASTNode visitInsert(final InsertContext ctx) { if (null != ctx.insertSingleTable()) { OracleInsertStatement result = (OracleInsertStatement) visit(ctx.insertSingleTable().insertValuesClause()); result.setTable((SimpleTableSegment) visit(ctx.insertSingleTable().insertIntoClause().tableName())); result.setParameterCount(getCurrentParameterIndex()); return result; } return new OracleInsertStatement(); } @SuppressWarnings("unchecked") @Override public ASTNode visitInsertValuesClause(final InsertValuesClauseContext ctx) { OracleInsertStatement result = new OracleInsertStatement(); if (null != ctx.columnNames()) { ColumnNamesContext columnNames = ctx.columnNames(); CollectionValue<ColumnSegment> columnSegments = (CollectionValue<ColumnSegment>) visit(columnNames); result.setInsertColumns(new InsertColumnsSegment(columnNames.start.getStartIndex(), columnNames.stop.getStopIndex(), columnSegments.getValue())); } else { result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList())); } result.getValues().addAll(createInsertValuesSegments(ctx.assignmentValues())); return result; } private Collection<InsertValuesSegment> createInsertValuesSegments(final Collection<AssignmentValuesContext> assignmentValuesContexts) { Collection<InsertValuesSegment> result = new LinkedList<>(); for (AssignmentValuesContext each : assignmentValuesContexts) { result.add((InsertValuesSegment) visit(each)); } return result; } @Override public ASTNode visitUpdate(final UpdateContext ctx) { OracleUpdateStatement result = new OracleUpdateStatement(); result.setTableSegment((TableSegment) visit(ctx.tableReferences())); result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause())); if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } result.setParameterCount(getCurrentParameterIndex()); return result; } @Override public ASTNode visitSetAssignmentsClause(final SetAssignmentsClauseContext ctx) { Collection<AssignmentSegment> assignments = new LinkedList<>(); for (AssignmentContext each : ctx.assignment()) { assignments.add((AssignmentSegment) visit(each)); } return new SetAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), assignments); } @Override public ASTNode visitAssignmentValues(final AssignmentValuesContext ctx) { List<ExpressionSegment> segments = new LinkedList<>(); for (AssignmentValueContext each : ctx.assignmentValue()) { segments.add((ExpressionSegment) visit(each)); } return new InsertValuesSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), segments); } @Override public ASTNode visitAssignment(final AssignmentContext ctx) { ColumnSegment column = (ColumnSegment) visitColumnName(ctx.columnName()); ExpressionSegment value = (ExpressionSegment) visit(ctx.assignmentValue()); return new AssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, value); } @Override public ASTNode visitAssignmentValue(final AssignmentValueContext ctx) { ExprContext expr = ctx.expr(); if (null != expr) { return visit(expr); } return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } @Override public ASTNode visitDelete(final DeleteContext ctx) { OracleDeleteStatement result = new OracleDeleteStatement(); if (null != ctx.multipleTablesClause()) { result.setTableSegment((TableSegment) visit(ctx.multipleTablesClause())); } else { result.setTableSegment((TableSegment) visit(ctx.singleTableClause())); } if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } result.setParameterCount(getCurrentParameterIndex()); return result; } @Override public ASTNode visitSingleTableClause(final SingleTableClauseContext ctx) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } @Override public ASTNode visitMultipleTablesClause(final MultipleTablesClauseContext ctx) { DeleteMultiTableSegment result = new DeleteMultiTableSegment(); TableSegment relateTableSource = (TableSegment) visit(ctx.tableReferences()); result.setRelationTable(relateTableSource); result.setActualDeleteTables(generateTablesFromTableMultipleTableNames(ctx.multipleTableNames())); return result; } private List<SimpleTableSegment> generateTablesFromTableMultipleTableNames(final MultipleTableNamesContext ctx) { List<SimpleTableSegment> result = new LinkedList<>(); for (TableNameContext each : ctx.tableName()) { result.add((SimpleTableSegment) visit(each)); } return result; } @Override public ASTNode visitSelect(final SelectContext ctx) { OracleSelectStatement result = (OracleSelectStatement) visit(ctx.unionClause()); result.setParameterCount(getCurrentParameterIndex()); return result; } @Override public ASTNode visitUnionClause(final UnionClauseContext ctx) { return visit(ctx.selectClause(0)); } @Override public ASTNode visitSelectClause(final SelectClauseContext ctx) { OracleSelectStatement result = new OracleSelectStatement(); result.setProjections((ProjectionsSegment) visit(ctx.projections())); if (null != ctx.duplicateSpecification()) { result.getProjections().setDistinctRow(isDistinct(ctx)); } if (null != ctx.fromClause()) { TableSegment tableSegment = (TableSegment) visit(ctx.fromClause()); result.setFrom(tableSegment); } if (null != ctx.whereClause()) { result.setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.groupByClause()) { result.setGroupBy((GroupBySegment) visit(ctx.groupByClause())); } if (null != ctx.orderByClause()) { result.setOrderBy((OrderBySegment) visit(ctx.orderByClause())); } if (null != ctx.lockClause()) { result.setLock((LockSegment) visit(ctx.lockClause())); } return result; } private boolean isDistinct(final SelectClauseContext ctx) { return ((BooleanLiteralValue) visit(ctx.duplicateSpecification())).getValue(); } @Override public ASTNode visitDuplicateSpecification(final DuplicateSpecificationContext ctx) { return new BooleanLiteralValue(null != ctx.DISTINCT()); } @Override public ASTNode visitProjections(final ProjectionsContext ctx) { Collection<ProjectionSegment> projections = new LinkedList<>(); if (null != ctx.unqualifiedShorthand()) { projections.add(new ShorthandProjectionSegment(ctx.unqualifiedShorthand().getStart().getStartIndex(), ctx.unqualifiedShorthand().getStop().getStopIndex())); } for (ProjectionContext each : ctx.projection()) { projections.add((ProjectionSegment) visit(each)); } ProjectionsSegment result = new ProjectionsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); result.getProjections().addAll(projections); return result; } @Override public ASTNode visitProjection(final ProjectionContext ctx) { if (null != ctx.qualifiedShorthand()) { QualifiedShorthandContext shorthand = ctx.qualifiedShorthand(); ShorthandProjectionSegment result = new ShorthandProjectionSegment(shorthand.getStart().getStartIndex(), shorthand.getStop().getStopIndex()); IdentifierValue identifier = new IdentifierValue(shorthand.identifier().getText()); result.setOwner(new OwnerSegment(shorthand.identifier().getStart().getStartIndex(), shorthand.identifier().getStop().getStopIndex(), identifier)); return result; } AliasSegment alias = null == ctx.alias() ? null : (AliasSegment) visit(ctx.alias()); if (null != ctx.columnName()) { ColumnSegment column = (ColumnSegment) visit(ctx.columnName()); ColumnProjectionSegment result = new ColumnProjectionSegment(column); result.setAlias(alias); return result; } return createProjection(ctx, alias); } @Override public ASTNode visitAlias(final AliasContext ctx) { if (null != ctx.identifier()) { return new AliasSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier())); } return new AliasSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), new IdentifierValue(ctx.STRING_().getText())); } private ASTNode createProjection(final ProjectionContext ctx, final AliasSegment alias) { ASTNode projection = visit(ctx.expr()); if (projection instanceof AggregationProjectionSegment) { ((AggregationProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof ExpressionProjectionSegment) { ((ExpressionProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof CommonExpressionSegment) { CommonExpressionSegment segment = (CommonExpressionSegment) projection; ExpressionProjectionSegment result = new ExpressionProjectionSegment(segment.getStartIndex(), segment.getStopIndex(), segment.getText()); result.setAlias(alias); return result; } if (projection instanceof ColumnSegment) { ColumnProjectionSegment result = new ColumnProjectionSegment((ColumnSegment) projection); result.setAlias(alias); return result; } if (projection instanceof SubqueryExpressionSegment) { SubqueryProjectionSegment result = new SubqueryProjectionSegment(((SubqueryExpressionSegment) projection).getSubquery()); result.setAlias(alias); return result; } if (projection instanceof BinaryOperationExpression) { int startIndex = ((BinaryOperationExpression) projection).getStartIndex(); int stopIndex = null != alias ? alias.getStopIndex() : ((BinaryOperationExpression) projection).getStopIndex(); ExpressionProjectionSegment result = new ExpressionProjectionSegment(startIndex, stopIndex, ((BinaryOperationExpression) projection).getText()); result.setAlias(alias); return result; } LiteralExpressionSegment column = (LiteralExpressionSegment) projection; ExpressionProjectionSegment result = null == alias ? new ExpressionProjectionSegment(column.getStartIndex(), column.getStopIndex(), String.valueOf(column.getLiterals())) : new ExpressionProjectionSegment(column.getStartIndex(), ctx.alias().stop.getStopIndex(), String.valueOf(column.getLiterals())); result.setAlias(alias); return result; } @Override public ASTNode visitFromClause(final FromClauseContext ctx) { return visit(ctx.tableReferences()); } @Override public ASTNode visitTableReferences(final TableReferencesContext ctx) { TableSegment result = (TableSegment) visit(ctx.tableReference(0)); if (ctx.tableReference().size() > 1) { for (int i = 1; i < ctx.tableReference().size(); i++) { result = generateJoinTableSourceFromTableReference(ctx.tableReference(i), result); } } return result; } private JoinTableSegment generateJoinTableSourceFromTableReference(final TableReferenceContext ctx, final TableSegment tableSegment) { JoinTableSegment result = new JoinTableSegment(); result.setStartIndex(tableSegment.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); result.setLeft(tableSegment); result.setRight((TableSegment) visit(ctx)); return result; } @Override public ASTNode visitTableReference(final TableReferenceContext ctx) { TableSegment result; TableSegment left; left = (TableSegment) visit(ctx.tableFactor()); if (!ctx.joinedTable().isEmpty()) { for (JoinedTableContext each : ctx.joinedTable()) { left = visitJoinedTable(each, left); } } result = left; return result; } @Override public ASTNode visitTableFactor(final TableFactorContext ctx) { if (null != ctx.subquery()) { OracleSelectStatement subquery = (OracleSelectStatement) visit(ctx.subquery()); SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), subquery); SubqueryTableSegment result = new SubqueryTableSegment(subquerySegment); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } if (null != ctx.tableName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } return visit(ctx.tableReferences()); } private JoinTableSegment visitJoinedTable(final JoinedTableContext ctx, final TableSegment tableSegment) { JoinTableSegment result = new JoinTableSegment(); result.setLeft(tableSegment); result.setStartIndex(tableSegment.getStartIndex()); result.setStopIndex(ctx.stop.getStopIndex()); TableSegment right = (TableSegment) visit(ctx.tableFactor()); result.setRight(right); if (null != ctx.joinSpecification()) { result = visitJoinSpecification(ctx.joinSpecification(), result); } return result; } private JoinTableSegment visitJoinSpecification(final JoinSpecificationContext ctx, final JoinTableSegment joinTableSource) { if (null != ctx.expr()) { ExpressionSegment condition = (ExpressionSegment) visit(ctx.expr()); joinTableSource.setCondition(condition); } if (null != ctx.USING()) { List<ColumnSegment> columnSegmentList = new LinkedList<>(); for (ColumnNameContext cname : ctx.columnNames().columnName()) { columnSegmentList.add((ColumnSegment) visit(cname)); } joinTableSource.setUsing(columnSegmentList); } return joinTableSource; } @Override public ASTNode visitWhereClause(final WhereClauseContext ctx) { ASTNode segment = visit(ctx.expr()); return new WhereSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ExpressionSegment) segment); } @Override public ASTNode visitGroupByClause(final GroupByClauseContext ctx) { Collection<OrderByItemSegment> items = new LinkedList<>(); for (OrderByItemContext each : ctx.orderByItem()) { items.add((OrderByItemSegment) visit(each)); } return new GroupBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items); } @Override public ASTNode visitSubquery(final SubqueryContext ctx) { return visit(ctx.unionClause()); } @Override public ASTNode visitLockClause(final LockClauseContext ctx) { return new LockSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex()); } @Override @Override public ASTNode visitIntoClause(final IntoClauseContext ctx) { if (null != ctx.tableName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } SimpleTableSegment result = (SimpleTableSegment) visit(ctx.viewName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } @Override public ASTNode visitUsingClause(final UsingClauseContext ctx) { if (null != ctx.tableName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } if (null != ctx.viewName()) { SimpleTableSegment result = (SimpleTableSegment) visit(ctx.viewName()); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } OracleSelectStatement subquery = (OracleSelectStatement) visit(ctx.subquery()); SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), subquery); SubqueryTableSegment result = new SubqueryTableSegment(subquerySegment); if (null != ctx.alias()) { result.setAlias((AliasSegment) visit(ctx.alias())); } return result; } @Override public ASTNode visitMergeUpdateClause(final MergeUpdateClauseContext ctx) { OracleMergeStatement result = new OracleMergeStatement(); result.getUpdate().setSetAssignment((SetAssignmentSegment) visit(ctx.mergeSetAssignmentsClause())); if (null != ctx.whereClause()) { result.getUpdate().setWhere((WhereSegment) visit(ctx.whereClause())); } if (null != ctx.deleteWhereClause()) { result.getDelete().setWhere((WhereSegment) visit(ctx.deleteWhereClause())); } return result; } @Override public ASTNode visitMergeSetAssignmentsClause(final MergeSetAssignmentsClauseContext ctx) { Collection<AssignmentSegment> assignments = new LinkedList<>(); for (MergeAssignmentContext each : ctx.mergeAssignment()) { assignments.add((AssignmentSegment) visit(each)); } return new SetAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), assignments); } @Override public ASTNode visitMergeAssignment(final MergeAssignmentContext ctx) { ColumnSegment column = (ColumnSegment) visitColumnName(ctx.columnName()); ExpressionSegment value = (ExpressionSegment) visit(ctx.mergeAssignmentValue()); return new AssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, value); } @Override public ASTNode visitMergeAssignmentValue(final MergeAssignmentValueContext ctx) { ExprContext expr = ctx.expr(); if (null != expr) { return visit(expr); } return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText()); } @Override public ASTNode visitDeleteWhereClause(final DeleteWhereClauseContext ctx) { ASTNode segment = visit(ctx.whereClause().expr()); return new WhereSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ExpressionSegment) segment); } }
Refactored to remove `requestStartTime` from the dataclass
private void flush() throws InterruptedException { RequestInfo requestInfo = createRequestInfo(); while (rateLimitingStrategy.shouldBlock(requestInfo)) { mailboxExecutor.yield(); requestInfo = createRequestInfo(); } List<RequestEntryT> batch = createNextAvailableBatch(requestInfo); int batchSize = batch.size(); if (batchSize == 0) { return; } requestInfo.setBatchSize(batchSize); long timestampOfRequest = System.currentTimeMillis(); requestInfo.setRequestStartTime(timestampOfRequest); Consumer<List<RequestEntryT>> requestResultCallback = failedRequestEntries -> mailboxExecutor.execute( () -> completeRequest( failedRequestEntries, batchSize, timestampOfRequest), "Mark in-flight request as completed and requeue %d request entries", failedRequestEntries.size()); rateLimitingStrategy.registerInFlightRequest(requestInfo); inFlightRequestsCount++; submitRequestEntries(batch, requestResultCallback); }
requestInfo.setRequestStartTime(timestampOfRequest);
private void flush() throws InterruptedException { RequestInfo requestInfo = createRequestInfo(); while (rateLimitingStrategy.shouldBlock(requestInfo)) { mailboxExecutor.yield(); requestInfo = createRequestInfo(); } List<RequestEntryT> batch = createNextAvailableBatch(requestInfo); if (batch.size() == 0) { return; } int batchSize = requestInfo.getBatchSize(); long requestTimestamp = System.currentTimeMillis(); Consumer<List<RequestEntryT>> requestToRetry = failedRequestEntries -> mailboxExecutor.execute( () -> completeRequest( failedRequestEntries, batchSize, requestTimestamp), "Mark in-flight request as completed and requeue %d request entries", failedRequestEntries.size()); rateLimitingStrategy.registerInFlightRequest(requestInfo); inFlightRequestsCount++; submitRequestEntries(batch, requestToRetry); }
class AsyncSinkWriter<InputT, RequestEntryT extends Serializable> implements StatefulSink.StatefulSinkWriter<InputT, BufferedRequestState<RequestEntryT>> { private final MailboxExecutor mailboxExecutor; private final ProcessingTimeService timeService; /* The timestamp of the previous batch of records was sent from this sink. */ private long lastSendTimestamp = 0; /* The timestamp of the response to the previous request from this sink. */ private long ackTime = Long.MAX_VALUE; /* The sink writer metric group. */ private final SinkWriterMetricGroup metrics; /* Counter for number of bytes this sink has attempted to send to the destination. */ private final Counter numBytesSendCounter; /* Counter for number of records this sink has attempted to send to the destination. */ private final Counter numRecordsSendCounter; private final RateLimitingStrategy rateLimitingStrategy; private final int maxBatchSize; private final int maxBufferedRequests; private final long maxBatchSizeInBytes; private final long maxTimeInBufferMS; private final long maxRecordSizeInBytes; /** * The ElementConverter provides a mapping between for the elements of a stream to request * entries that can be sent to the destination. * * <p>The resulting request entry is buffered by the AsyncSinkWriter and sent to the destination * when the {@code submitRequestEntries} method is invoked. */ private final ElementConverter<InputT, RequestEntryT> elementConverter; /** * Buffer to hold request entries that should be persisted into the destination, along with its * size in bytes. * * <p>A request entry contain all relevant details to make a call to the destination. Eg, for * Kinesis Data Streams a request entry contains the payload and partition key. * * <p>It seems more natural to buffer InputT, ie, the events that should be persisted, rather * than RequestEntryT. However, in practice, the response of a failed request call can make it * very hard, if not impossible, to reconstruct the original event. It is much easier, to just * construct a new (retry) request entry from the response and add that back to the queue for * later retry. */ private final Deque<RequestEntryWrapper<RequestEntryT>> bufferedRequestEntries = new ArrayDeque<>(); /** * Tracks all pending async calls that have been executed since the last checkpoint. Calls that * completed (successfully or unsuccessfully) are automatically decrementing the counter. Any * request entry that was not successfully persisted needs to be handled and retried by the * logic in {@code submitRequestsToApi}. * * <p>To complete a checkpoint, we need to make sure that no requests are in flight, as they may * fail, which could then lead to data loss. */ private int inFlightRequestsCount; /** * Tracks the cumulative size of all elements in {@code bufferedRequestEntries} to facilitate * the criterion for flushing after {@code maxBatchSizeInBytes} is reached. */ private double bufferedRequestEntriesTotalSizeInBytes; private boolean existsActiveTimerCallback = false; /** * The {@code accept} method should be called on this Consumer if the processing of the {@code * requestEntries} raises an exception that should not be retried. Specifically, any action that * we are sure will result in the same exception no matter how many times we retry should raise * a {@code RuntimeException} here. For example, wrong user credentials. However, it is possible * intermittent failures will recover, e.g. flaky network connections, in which case, some other * mechanism may be more appropriate. */ private final Consumer<Exception> fatalExceptionCons; /** * This method specifies how to persist buffered request entries into the destination. It is * implemented when support for a new destination is added. * * <p>The method is invoked with a set of request entries according to the buffering hints (and * the valid limits of the destination). The logic then needs to create and execute the request * asynchronously against the destination (ideally by batching together multiple request entries * to increase efficiency). The logic also needs to identify individual request entries that * were not persisted successfully and resubmit them using the {@code requestResult} callback. * * <p>From a threading perspective, the mailbox thread will call this method and initiate the * asynchronous request to persist the {@code requestEntries}. NOTE: The client must support * asynchronous requests and the method called to persist the records must asynchronously * execute and return a future with the results of that request. A thread from the destination * client thread pool should complete the request and submit the failed entries that should be * retried. The {@code requestResult} will then trigger the mailbox thread to requeue the * unsuccessful elements. * * <p>An example implementation of this method is included: * * <pre>{@code * @Override * protected void submitRequestEntries * (List<RequestEntryT> records, Consumer<Collection<RequestEntryT>> requestResult) { * Future<Response> response = destinationClient.putRecords(records); * response.whenComplete( * (response, error) -> { * if(error){ * List<RequestEntryT> retryableFailedRecords = getRetryableFailed(response); * requestResult.accept(retryableFailedRecords); * }else{ * requestResult.accept(Collections.emptyList()); * } * } * ); * } * * }</pre> * * <p>During checkpointing, the sink needs to ensure that there are no outstanding in-flight * requests. * * @param requestEntries a set of request entries that should be sent to the destination * @param requestResult the {@code accept} method should be called on this Consumer once the * processing of the {@code requestEntries} are complete. Any entries that encountered * difficulties in persisting should be re-queued through {@code requestResult} by including * that element in the collection of {@code RequestEntryT}s passed to the {@code accept} * method. All other elements are assumed to have been successfully persisted. */ protected abstract void submitRequestEntries( List<RequestEntryT> requestEntries, Consumer<List<RequestEntryT>> requestResult); /** * This method allows the getting of the size of a {@code RequestEntryT} in bytes. The size in * this case is measured as the total bytes that is written to the destination as a result of * persisting this particular {@code RequestEntryT} rather than the serialized length (which may * be the same). * * @param requestEntry the requestEntry for which we want to know the size * * @return the size of the requestEntry, as defined previously */ protected abstract long getSizeInBytes(RequestEntryT requestEntry); @Deprecated public AsyncSinkWriter( ElementConverter<InputT, RequestEntryT> elementConverter, Sink.InitContext context, int maxBatchSize, int maxInFlightRequests, int maxBufferedRequests, long maxBatchSizeInBytes, long maxTimeInBufferMS, long maxRecordSizeInBytes) { this( elementConverter, context, maxBatchSize, maxInFlightRequests, maxBufferedRequests, maxBatchSizeInBytes, maxTimeInBufferMS, maxRecordSizeInBytes, Collections.emptyList()); } @Deprecated public AsyncSinkWriter( ElementConverter<InputT, RequestEntryT> elementConverter, Sink.InitContext context, int maxBatchSize, int maxInFlightRequests, int maxBufferedRequests, long maxBatchSizeInBytes, long maxTimeInBufferMS, long maxRecordSizeInBytes, Collection<BufferedRequestState<RequestEntryT>> states) { this( elementConverter, context, maxBatchSize, maxBufferedRequests, maxBatchSizeInBytes, maxTimeInBufferMS, maxRecordSizeInBytes, states, CongestionControlRateLimitingStrategy.builder() .setMaxInFlightRequests(maxInFlightRequests) .setInitialMaxInFlightMessages(maxBatchSize) .setAimdScalingStrategy( AIMDScalingStrategy.builder() .setRateThreshold(maxBatchSize * maxInFlightRequests) .build()) .build()); } public AsyncSinkWriter( ElementConverter<InputT, RequestEntryT> elementConverter, Sink.InitContext context, int maxBatchSize, int maxBufferedRequests, long maxBatchSizeInBytes, long maxTimeInBufferMS, long maxRecordSizeInBytes, Collection<BufferedRequestState<RequestEntryT>> states, RateLimitingStrategy rateLimitingStrategy) { this.elementConverter = elementConverter; this.mailboxExecutor = context.getMailboxExecutor(); this.timeService = context.getProcessingTimeService(); Preconditions.checkNotNull(elementConverter); Preconditions.checkArgument(maxBatchSize > 0); Preconditions.checkArgument(maxBufferedRequests > 0); Preconditions.checkArgument(maxBatchSizeInBytes > 0); Preconditions.checkArgument(maxTimeInBufferMS > 0); Preconditions.checkArgument(maxRecordSizeInBytes > 0); Preconditions.checkArgument( maxBufferedRequests > maxBatchSize, "The maximum number of requests that may be buffered should be strictly" + " greater than the maximum number of requests per batch."); Preconditions.checkArgument( maxBatchSizeInBytes >= maxRecordSizeInBytes, "The maximum allowed size in bytes per flush must be greater than or equal to the" + " maximum allowed size in bytes of a single record."); Preconditions.checkNotNull(rateLimitingStrategy); this.maxBatchSize = maxBatchSize; this.maxBufferedRequests = maxBufferedRequests; this.maxBatchSizeInBytes = maxBatchSizeInBytes; this.maxTimeInBufferMS = maxTimeInBufferMS; this.maxRecordSizeInBytes = maxRecordSizeInBytes; this.rateLimitingStrategy = rateLimitingStrategy; this.inFlightRequestsCount = 0; this.bufferedRequestEntriesTotalSizeInBytes = 0; this.metrics = context.metricGroup(); this.metrics.setCurrentSendTimeGauge(() -> this.ackTime - this.lastSendTimestamp); this.numBytesSendCounter = this.metrics.getNumBytesSendCounter(); this.numRecordsSendCounter = this.metrics.getNumRecordsSendCounter(); this.fatalExceptionCons = exception -> mailboxExecutor.execute( () -> { throw exception; }, "A fatal exception occurred in the sink that cannot be recovered from or should not be retried."); initializeState(states); } private void registerCallback() { ProcessingTimeService.ProcessingTimeCallback ptc = instant -> { existsActiveTimerCallback = false; while (!bufferedRequestEntries.isEmpty()) { flush(); } }; timeService.registerTimer(timeService.getCurrentProcessingTime() + maxTimeInBufferMS, ptc); existsActiveTimerCallback = true; } @Override public void write(InputT element, Context context) throws IOException, InterruptedException { while (bufferedRequestEntries.size() >= maxBufferedRequests) { flush(); } addEntryToBuffer(elementConverter.apply(element, context), false); nonBlockingFlush(); } /** * Determines if a call to flush will be non-blocking (i.e. {@code inFlightRequestsCount} is * strictly smaller than {@code maxInFlightRequests}). Also requires one of the following * requirements to be met: * * <ul> * <li>The number of elements buffered is greater than or equal to the {@code maxBatchSize} * <li>The sum of the size in bytes of all records in the buffer is greater than or equal to * {@code maxBatchSizeInBytes} * </ul> */ private void nonBlockingFlush() throws InterruptedException { while (!rateLimitingStrategy.shouldBlock(createRequestInfo()) && (bufferedRequestEntries.size() >= getNextBatchSizeLimit() || bufferedRequestEntriesTotalSizeInBytes >= maxBatchSizeInBytes)) { flush(); } } private RequestInfo createRequestInfo() { int batchSize = getNextBatchSize(); return RequestInfo.builder().setBatchSize(batchSize).build(); } /** * Persists buffered RequestsEntries into the destination by invoking {@code * submitRequestEntries} with batches according to the user specified buffering hints. * * <p>The method checks with the {@code rateLimitingStrategy} to see if it should block the * request. */ private int getNextBatchSize() { return Math.min(rateLimitingStrategy.getMaxBatchSize(), bufferedRequestEntries.size()); } /** * Creates the next batch of request entries while respecting the {@code maxBatchSize} and * {@code maxBatchSizeInBytes}. Also adds these to the metrics counters. */ private List<RequestEntryT> createNextAvailableBatch(RequestInfo requestInfo) { List<RequestEntryT> batch = new ArrayList<>(requestInfo.batchSize); long batchSizeBytes = 0; for (int i = 0; i < requestInfo.batchSize; i++) { long requestEntrySize = bufferedRequestEntries.peek().getSize(); if (batchSizeBytes + requestEntrySize > maxBatchSizeInBytes) { break; } RequestEntryWrapper<RequestEntryT> elem = bufferedRequestEntries.remove(); batch.add(elem.getRequestEntry()); bufferedRequestEntriesTotalSizeInBytes -= requestEntrySize; batchSizeBytes += requestEntrySize; } numRecordsSendCounter.inc(batch.size()); numBytesSendCounter.inc(batchSizeBytes); return batch; } /** * Marks an in-flight request as completed and prepends failed requestEntries back to the * internal requestEntry buffer for later retry. * * @param failedRequestEntries requestEntries that need to be retried */ private void completeRequest( List<RequestEntryT> failedRequestEntries, int batchSize, long requestStartTime) throws InterruptedException { lastSendTimestamp = requestStartTime; ackTime = System.currentTimeMillis(); inFlightRequestsCount--; rateLimitingStrategy.registerCompletedRequest( RequestInfo.builder() .setFailedMessages(failedRequestEntries.size()) .setRequestStartTime(requestStartTime) .setBatchSize(batchSize) .build()); ListIterator<RequestEntryT> iterator = failedRequestEntries.listIterator(failedRequestEntries.size()); while (iterator.hasPrevious()) { addEntryToBuffer(iterator.previous(), true); } nonBlockingFlush(); } private void addEntryToBuffer(RequestEntryT entry, boolean insertAtHead) { if (bufferedRequestEntries.isEmpty() && !existsActiveTimerCallback) { registerCallback(); } RequestEntryWrapper<RequestEntryT> wrappedEntry = new RequestEntryWrapper<>(entry, getSizeInBytes(entry)); if (wrappedEntry.getSize() > maxRecordSizeInBytes) { throw new IllegalArgumentException( String.format( "The request entry sent to the buffer was of size [%s], when the maxRecordSizeInBytes was set to [%s].", wrappedEntry.getSize(), maxRecordSizeInBytes)); } if (insertAtHead) { bufferedRequestEntries.addFirst(wrappedEntry); } else { bufferedRequestEntries.add(wrappedEntry); } bufferedRequestEntriesTotalSizeInBytes += wrappedEntry.getSize(); } /** * In flight requests will be retried if the sink is still healthy. But if in-flight requests * fail after a checkpoint has been triggered and Flink needs to recover from the checkpoint, * the (failed) in-flight requests are gone and cannot be retried. Hence, there cannot be any * outstanding in-flight requests when a commit is initialized. * * <p>To this end, all in-flight requests need to completed before proceeding with the commit. */ @Override public void flush(boolean flush) throws InterruptedException { while (inFlightRequestsCount > 0 || (bufferedRequestEntries.size() > 0 && flush)) { yieldIfThereExistsInFlightRequests(); if (flush) { flush(); } } } private void yieldIfThereExistsInFlightRequests() throws InterruptedException { if (inFlightRequestsCount > 0) { mailboxExecutor.yield(); } } /** * All in-flight requests that are relevant for the snapshot have been completed, but there may * still be request entries in the internal buffers that are yet to be sent to the endpoint. * These request entries are stored in the snapshot state so that they don't get lost in case of * a failure/restart of the application. */ @Override public List<BufferedRequestState<RequestEntryT>> snapshotState(long checkpointId) { return Collections.singletonList(new BufferedRequestState<>((bufferedRequestEntries))); } private void initializeState(Collection<BufferedRequestState<RequestEntryT>> states) { for (BufferedRequestState<RequestEntryT> state : states) { initializeState(state); } } private void initializeState(BufferedRequestState<RequestEntryT> state) { this.bufferedRequestEntries.addAll(state.getBufferedRequestEntries()); for (RequestEntryWrapper<RequestEntryT> wrapper : bufferedRequestEntries) { if (wrapper.getSize() > maxRecordSizeInBytes) { throw new IllegalStateException( String.format( "State contains record of size %d which exceeds sink maximum record size %d.", wrapper.getSize(), maxRecordSizeInBytes)); } } this.bufferedRequestEntriesTotalSizeInBytes += state.getStateSize(); } @Override public void close() { } private int getNextBatchSizeLimit() { return Math.min(maxBatchSize, rateLimitingStrategy.getMaxBatchSize()); } protected Consumer<Exception> getFatalExceptionCons() { return fatalExceptionCons; } }
class AsyncSinkWriter<InputT, RequestEntryT extends Serializable> implements StatefulSink.StatefulSinkWriter<InputT, BufferedRequestState<RequestEntryT>> { private final MailboxExecutor mailboxExecutor; private final ProcessingTimeService timeService; /* The timestamp of the previous batch of records was sent from this sink. */ private long lastSendTimestamp = 0; /* The timestamp of the response to the previous request from this sink. */ private long ackTime = Long.MAX_VALUE; /* The sink writer metric group. */ private final SinkWriterMetricGroup metrics; /* Counter for number of bytes this sink has attempted to send to the destination. */ private final Counter numBytesSendCounter; /* Counter for number of records this sink has attempted to send to the destination. */ private final Counter numRecordsSendCounter; private final RateLimitingStrategy rateLimitingStrategy; private final int maxBatchSize; private final int maxBufferedRequests; private final long maxBatchSizeInBytes; private final long maxTimeInBufferMS; private final long maxRecordSizeInBytes; /** * The ElementConverter provides a mapping between for the elements of a stream to request * entries that can be sent to the destination. * * <p>The resulting request entry is buffered by the AsyncSinkWriter and sent to the destination * when the {@code submitRequestEntries} method is invoked. */ private final ElementConverter<InputT, RequestEntryT> elementConverter; /** * Buffer to hold request entries that should be persisted into the destination, along with its * size in bytes. * * <p>A request entry contain all relevant details to make a call to the destination. Eg, for * Kinesis Data Streams a request entry contains the payload and partition key. * * <p>It seems more natural to buffer InputT, ie, the events that should be persisted, rather * than RequestEntryT. However, in practice, the response of a failed request call can make it * very hard, if not impossible, to reconstruct the original event. It is much easier, to just * construct a new (retry) request entry from the response and add that back to the queue for * later retry. */ private final Deque<RequestEntryWrapper<RequestEntryT>> bufferedRequestEntries = new ArrayDeque<>(); /** * Tracks all pending async calls that have been executed since the last checkpoint. Calls that * completed (successfully or unsuccessfully) are automatically decrementing the counter. Any * request entry that was not successfully persisted needs to be handled and retried by the * logic in {@code submitRequestsToApi}. * * <p>To complete a checkpoint, we need to make sure that no requests are in flight, as they may * fail, which could then lead to data loss. */ private int inFlightRequestsCount; /** * Tracks the cumulative size of all elements in {@code bufferedRequestEntries} to facilitate * the criterion for flushing after {@code maxBatchSizeInBytes} is reached. */ private double bufferedRequestEntriesTotalSizeInBytes; private boolean existsActiveTimerCallback = false; /** * The {@code accept} method should be called on this Consumer if the processing of the {@code * requestEntries} raises an exception that should not be retried. Specifically, any action that * we are sure will result in the same exception no matter how many times we retry should raise * a {@code RuntimeException} here. For example, wrong user credentials. However, it is possible * intermittent failures will recover, e.g. flaky network connections, in which case, some other * mechanism may be more appropriate. */ private final Consumer<Exception> fatalExceptionCons; /** * This method specifies how to persist buffered request entries into the destination. It is * implemented when support for a new destination is added. * * <p>The method is invoked with a set of request entries according to the buffering hints (and * the valid limits of the destination). The logic then needs to create and execute the request * asynchronously against the destination (ideally by batching together multiple request entries * to increase efficiency). The logic also needs to identify individual request entries that * were not persisted successfully and resubmit them using the {@code requestToRetry} callback. * * <p>From a threading perspective, the mailbox thread will call this method and initiate the * asynchronous request to persist the {@code requestEntries}. NOTE: The client must support * asynchronous requests and the method called to persist the records must asynchronously * execute and return a future with the results of that request. A thread from the destination * client thread pool should complete the request and submit the failed entries that should be * retried. The {@code requestToRetry} will then trigger the mailbox thread to requeue the * unsuccessful elements. * * <p>An example implementation of this method is included: * * <pre>{@code * @Override * protected void submitRequestEntries * (List<RequestEntryT> records, Consumer<Collection<RequestEntryT>> requestToRetry) { * Future<Response> response = destinationClient.putRecords(records); * response.whenComplete( * (response, error) -> { * if(error){ * List<RequestEntryT> retryableFailedRecords = getRetryableFailed(response); * requestToRetry.accept(retryableFailedRecords); * }else{ * requestToRetry.accept(Collections.emptyList()); * } * } * ); * } * * }</pre> * * <p>During checkpointing, the sink needs to ensure that there are no outstanding in-flight * requests. * * @param requestEntries a set of request entries that should be sent to the destination * @param requestToRetry the {@code accept} method should be called on this Consumer once the * processing of the {@code requestEntries} are complete. Any entries that encountered * difficulties in persisting should be re-queued through {@code requestToRetry} by * including that element in the collection of {@code RequestEntryT}s passed to the {@code * accept} method. All other elements are assumed to have been successfully persisted. */ protected abstract void submitRequestEntries( List<RequestEntryT> requestEntries, Consumer<List<RequestEntryT>> requestToRetry); /** * This method allows the getting of the size of a {@code RequestEntryT} in bytes. The size in * this case is measured as the total bytes that is written to the destination as a result of * persisting this particular {@code RequestEntryT} rather than the serialized length (which may * be the same). * * @param requestEntry the requestEntry for which we want to know the size * @return the size of the requestEntry, as defined previously */ protected abstract long getSizeInBytes(RequestEntryT requestEntry); /** * This method is deprecated, please use the constructor that specifies the {@link * AsyncSinkWriterConfiguration}. */ @Deprecated public AsyncSinkWriter( ElementConverter<InputT, RequestEntryT> elementConverter, Sink.InitContext context, int maxBatchSize, int maxInFlightRequests, int maxBufferedRequests, long maxBatchSizeInBytes, long maxTimeInBufferMS, long maxRecordSizeInBytes) { this( elementConverter, context, maxBatchSize, maxInFlightRequests, maxBufferedRequests, maxBatchSizeInBytes, maxTimeInBufferMS, maxRecordSizeInBytes, Collections.emptyList()); } /** * This method is deprecated, please use the constructor that specifies the {@link * AsyncSinkWriterConfiguration}. */ @Deprecated public AsyncSinkWriter( ElementConverter<InputT, RequestEntryT> elementConverter, Sink.InitContext context, int maxBatchSize, int maxInFlightRequests, int maxBufferedRequests, long maxBatchSizeInBytes, long maxTimeInBufferMS, long maxRecordSizeInBytes, Collection<BufferedRequestState<RequestEntryT>> states) { this( elementConverter, context, AsyncSinkWriterConfiguration.builder() .setMaxBatchSize(maxBatchSize) .setMaxBatchSizeInBytes(maxBatchSizeInBytes) .setMaxInFlightRequests(maxInFlightRequests) .setMaxBufferedRequests(maxBufferedRequests) .setMaxTimeInBufferMS(maxTimeInBufferMS) .setMaxRecordSizeInBytes(maxRecordSizeInBytes) .build(), states); } public AsyncSinkWriter( ElementConverter<InputT, RequestEntryT> elementConverter, Sink.InitContext context, AsyncSinkWriterConfiguration configuration, Collection<BufferedRequestState<RequestEntryT>> states) { this.elementConverter = elementConverter; this.mailboxExecutor = context.getMailboxExecutor(); this.timeService = context.getProcessingTimeService(); Preconditions.checkNotNull(elementConverter); Preconditions.checkArgument(configuration.getMaxBatchSize() > 0); Preconditions.checkArgument(configuration.getMaxBufferedRequests() > 0); Preconditions.checkArgument(configuration.getMaxBatchSizeInBytes() > 0); Preconditions.checkArgument(configuration.getMaxTimeInBufferMS() > 0); Preconditions.checkArgument(configuration.getMaxRecordSizeInBytes() > 0); Preconditions.checkArgument( configuration.getMaxBufferedRequests() > configuration.getMaxBatchSize(), "The maximum number of requests that may be buffered should be strictly" + " greater than the maximum number of requests per batch."); Preconditions.checkArgument( configuration.getMaxBatchSizeInBytes() >= configuration.getMaxRecordSizeInBytes(), "The maximum allowed size in bytes per flush must be greater than or equal to the" + " maximum allowed size in bytes of a single record."); Preconditions.checkNotNull(configuration.getRateLimitingStrategy()); this.maxBatchSize = configuration.getMaxBatchSize(); this.maxBufferedRequests = configuration.getMaxBufferedRequests(); this.maxBatchSizeInBytes = configuration.getMaxBatchSizeInBytes(); this.maxTimeInBufferMS = configuration.getMaxTimeInBufferMS(); this.maxRecordSizeInBytes = configuration.getMaxRecordSizeInBytes(); this.rateLimitingStrategy = configuration.getRateLimitingStrategy(); this.inFlightRequestsCount = 0; this.bufferedRequestEntriesTotalSizeInBytes = 0; this.metrics = context.metricGroup(); this.metrics.setCurrentSendTimeGauge(() -> this.ackTime - this.lastSendTimestamp); this.numBytesSendCounter = this.metrics.getNumBytesSendCounter(); this.numRecordsSendCounter = this.metrics.getNumRecordsSendCounter(); this.fatalExceptionCons = exception -> mailboxExecutor.execute( () -> { throw exception; }, "A fatal exception occurred in the sink that cannot be recovered from or should not be retried."); initializeState(states); } private void registerCallback() { ProcessingTimeService.ProcessingTimeCallback ptc = instant -> { existsActiveTimerCallback = false; while (!bufferedRequestEntries.isEmpty()) { flush(); } }; timeService.registerTimer(timeService.getCurrentProcessingTime() + maxTimeInBufferMS, ptc); existsActiveTimerCallback = true; } @Override public void write(InputT element, Context context) throws IOException, InterruptedException { while (bufferedRequestEntries.size() >= maxBufferedRequests) { flush(); } addEntryToBuffer(elementConverter.apply(element, context), false); nonBlockingFlush(); } /** * Determines if a call to flush will be non-blocking (i.e. {@code inFlightRequestsCount} is * strictly smaller than {@code maxInFlightRequests}). Also requires one of the following * requirements to be met: * * <ul> * <li>The number of elements buffered is greater than or equal to the {@code maxBatchSize} * <li>The sum of the size in bytes of all records in the buffer is greater than or equal to * {@code maxBatchSizeInBytes} * </ul> */ private void nonBlockingFlush() throws InterruptedException { while (!rateLimitingStrategy.shouldBlock(createRequestInfo()) && (bufferedRequestEntries.size() >= getNextBatchSizeLimit() || bufferedRequestEntriesTotalSizeInBytes >= maxBatchSizeInBytes)) { flush(); } } private BasicRequestInfo createRequestInfo() { int batchSize = getNextBatchSize(); return new BasicRequestInfo(batchSize); } /** * Persists buffered RequestsEntries into the destination by invoking {@code * submitRequestEntries} with batches according to the user specified buffering hints. * * <p>The method checks with the {@code rateLimitingStrategy} to see if it should block the * request. */ private int getNextBatchSize() { return Math.min(getNextBatchSizeLimit(), bufferedRequestEntries.size()); } /** * Creates the next batch of request entries while respecting the {@code maxBatchSize} and * {@code maxBatchSizeInBytes}. Also adds these to the metrics counters. */ private List<RequestEntryT> createNextAvailableBatch(RequestInfo requestInfo) { List<RequestEntryT> batch = new ArrayList<>(requestInfo.getBatchSize()); long batchSizeBytes = 0; for (int i = 0; i < requestInfo.getBatchSize(); i++) { long requestEntrySize = bufferedRequestEntries.peek().getSize(); if (batchSizeBytes + requestEntrySize > maxBatchSizeInBytes) { break; } RequestEntryWrapper<RequestEntryT> elem = bufferedRequestEntries.remove(); batch.add(elem.getRequestEntry()); bufferedRequestEntriesTotalSizeInBytes -= requestEntrySize; batchSizeBytes += requestEntrySize; } numRecordsSendCounter.inc(batch.size()); numBytesSendCounter.inc(batchSizeBytes); return batch; } /** * Marks an in-flight request as completed and prepends failed requestEntries back to the * internal requestEntry buffer for later retry. * * @param failedRequestEntries requestEntries that need to be retried */ private void completeRequest( List<RequestEntryT> failedRequestEntries, int batchSize, long requestStartTime) throws InterruptedException { lastSendTimestamp = requestStartTime; ackTime = System.currentTimeMillis(); inFlightRequestsCount--; rateLimitingStrategy.registerCompletedRequest( new BasicResultInfo(failedRequestEntries.size(), batchSize)); ListIterator<RequestEntryT> iterator = failedRequestEntries.listIterator(failedRequestEntries.size()); while (iterator.hasPrevious()) { addEntryToBuffer(iterator.previous(), true); } nonBlockingFlush(); } private void addEntryToBuffer(RequestEntryT entry, boolean insertAtHead) { if (bufferedRequestEntries.isEmpty() && !existsActiveTimerCallback) { registerCallback(); } RequestEntryWrapper<RequestEntryT> wrappedEntry = new RequestEntryWrapper<>(entry, getSizeInBytes(entry)); if (wrappedEntry.getSize() > maxRecordSizeInBytes) { throw new IllegalArgumentException( String.format( "The request entry sent to the buffer was of size [%s], when the maxRecordSizeInBytes was set to [%s].", wrappedEntry.getSize(), maxRecordSizeInBytes)); } if (insertAtHead) { bufferedRequestEntries.addFirst(wrappedEntry); } else { bufferedRequestEntries.add(wrappedEntry); } bufferedRequestEntriesTotalSizeInBytes += wrappedEntry.getSize(); } /** * In flight requests will be retried if the sink is still healthy. But if in-flight requests * fail after a checkpoint has been triggered and Flink needs to recover from the checkpoint, * the (failed) in-flight requests are gone and cannot be retried. Hence, there cannot be any * outstanding in-flight requests when a commit is initialized. * * <p>To this end, all in-flight requests need to completed before proceeding with the commit. */ @Override public void flush(boolean flush) throws InterruptedException { while (inFlightRequestsCount > 0 || (bufferedRequestEntries.size() > 0 && flush)) { yieldIfThereExistsInFlightRequests(); if (flush) { flush(); } } } private void yieldIfThereExistsInFlightRequests() throws InterruptedException { if (inFlightRequestsCount > 0) { mailboxExecutor.yield(); } } /** * All in-flight requests that are relevant for the snapshot have been completed, but there may * still be request entries in the internal buffers that are yet to be sent to the endpoint. * These request entries are stored in the snapshot state so that they don't get lost in case of * a failure/restart of the application. */ @Override public List<BufferedRequestState<RequestEntryT>> snapshotState(long checkpointId) { return Collections.singletonList(new BufferedRequestState<>((bufferedRequestEntries))); } private void initializeState(Collection<BufferedRequestState<RequestEntryT>> states) { for (BufferedRequestState<RequestEntryT> state : states) { initializeState(state); } } private void initializeState(BufferedRequestState<RequestEntryT> state) { this.bufferedRequestEntries.addAll(state.getBufferedRequestEntries()); for (RequestEntryWrapper<RequestEntryT> wrapper : bufferedRequestEntries) { if (wrapper.getSize() > maxRecordSizeInBytes) { throw new IllegalStateException( String.format( "State contains record of size %d which exceeds sink maximum record size %d.", wrapper.getSize(), maxRecordSizeInBytes)); } } this.bufferedRequestEntriesTotalSizeInBytes += state.getStateSize(); } @Override public void close() {} private int getNextBatchSizeLimit() { return Math.min(maxBatchSize, rateLimitingStrategy.getMaxBatchSize()); } protected Consumer<Exception> getFatalExceptionCons() { return fatalExceptionCons; } }
you are right.`SqlToOperationConverter:557` is the root cause. I find the test case does not covert the full change path (not the whole commit), such as I remove the line in `TableEnvironmentImpl#1052`, ``` final CatalogFactory factory = TableFactoryService.find( CatalogFactory.class, properties); ``` The test case can also run successfully. Do we need build `StreamExecutionEnvironmentImpl` with user classloader directly to verify the change.
public void testCreateCatalogFromUserClassLoader() throws Exception { final String className = "UserCatalogFactory"; URLClassLoader classLoader = ClassLoaderUtils.withRoot(temporaryFolder.newFolder()) .addResource("META-INF/services/org.apache.flink.table.factories.TableFactory", "UserCatalogFactory") .addClass( className, "import org.apache.flink.table.catalog.GenericInMemoryCatalog;\n" + "import org.apache.flink.table.factories.CatalogFactory;\n" + "import java.util.Collections;\n" + "import org.apache.flink.table.catalog.Catalog;\n" + "import java.util.HashMap;\n" + "import java.util.List;\n" + "import java.util.Map;\n" + "\tpublic class UserCatalogFactory implements CatalogFactory {\n" + "\t\t@Override\n" + "\t\tpublic Catalog createCatalog(\n" + "\t\t\t\tString name,\n" + "\t\t\t\tMap<String, String> properties) {\n" + "\t\t\treturn new GenericInMemoryCatalog(name);\n" + "\t\t}\n" + "\n" + "\t\t@Override\n" + "\t\tpublic Map<String, String> requiredContext() {\n" + "\t\t\tHashMap<String, String> hashMap = new HashMap<>();\n" + "\t\t\thashMap.put(\"type\", \"userCatalog\");\n" + "\t\t\treturn hashMap;\n" + "\t\t}\n" + "\n" + "\t\t@Override\n" + "\t\tpublic List<String> supportedProperties() {\n" + "\t\t\treturn Collections.emptyList();\n" + "\t\t}\n" + "\t}" ).build(); try (TemporaryClassLoaderContext context = TemporaryClassLoaderContext.of(classLoader)) { TableEnvironment tableEnvironment = getTableEnvironment(); tableEnvironment.executeSql("CREATE CATALOG cat WITH ('type'='userCatalog')"); assertTrue(tableEnvironment.getCatalog("cat").isPresent()); } }
try (TemporaryClassLoaderContext context = TemporaryClassLoaderContext.of(classLoader)) {
public void testCreateCatalogFromUserClassLoader() throws Exception { final String className = "UserCatalogFactory"; URLClassLoader classLoader = ClassLoaderUtils.withRoot(temporaryFolder.newFolder()) .addResource("META-INF/services/org.apache.flink.table.factories.TableFactory", "UserCatalogFactory") .addClass( className, "import org.apache.flink.table.catalog.GenericInMemoryCatalog;\n" + "import org.apache.flink.table.factories.CatalogFactory;\n" + "import java.util.Collections;\n" + "import org.apache.flink.table.catalog.Catalog;\n" + "import java.util.HashMap;\n" + "import java.util.List;\n" + "import java.util.Map;\n" + "\tpublic class UserCatalogFactory implements CatalogFactory {\n" + "\t\t@Override\n" + "\t\tpublic Catalog createCatalog(\n" + "\t\t\t\tString name,\n" + "\t\t\t\tMap<String, String> properties) {\n" + "\t\t\treturn new GenericInMemoryCatalog(name);\n" + "\t\t}\n" + "\n" + "\t\t@Override\n" + "\t\tpublic Map<String, String> requiredContext() {\n" + "\t\t\tHashMap<String, String> hashMap = new HashMap<>();\n" + "\t\t\thashMap.put(\"type\", \"userCatalog\");\n" + "\t\t\treturn hashMap;\n" + "\t\t}\n" + "\n" + "\t\t@Override\n" + "\t\tpublic List<String> supportedProperties() {\n" + "\t\t\treturn Collections.emptyList();\n" + "\t\t}\n" + "\t}" ).build(); try (TemporaryClassLoaderContext context = TemporaryClassLoaderContext.of(classLoader)) { TableEnvironment tableEnvironment = getTableEnvironment(); tableEnvironment.executeSql("CREATE CATALOG cat WITH ('type'='userCatalog')"); assertTrue(tableEnvironment.getCatalog("cat").isPresent()); } }
class CatalogITCase { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testCreateCatalog() { String name = "c1"; TableEnvironment tableEnv = getTableEnvironment(); String ddl = String.format("create catalog %s with('type'='%s')", name, CATALOG_TYPE_VALUE_GENERIC_IN_MEMORY); tableEnv.executeSql(ddl); assertTrue(tableEnv.getCatalog(name).isPresent()); assertTrue(tableEnv.getCatalog(name).get() instanceof GenericInMemoryCatalog); } @Test public void testDropCatalog() { String name = "c1"; TableEnvironment tableEnv = getTableEnvironment(); String ddl = String.format("create catalog %s with('type'='%s')", name, CATALOG_TYPE_VALUE_GENERIC_IN_MEMORY); tableEnv.executeSql(ddl); assertTrue(tableEnv.getCatalog(name).isPresent()); ddl = String.format("drop catalog %s", name); tableEnv.executeSql(ddl); assertFalse(tableEnv.getCatalog(name).isPresent()); } @Test private TableEnvironment getTableEnvironment() { EnvironmentSettings settings = EnvironmentSettings.newInstance().inStreamingMode().build(); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); return StreamTableEnvironment.create(env, settings); } }
class CatalogITCase { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testCreateCatalog() { String name = "c1"; TableEnvironment tableEnv = getTableEnvironment(); String ddl = String.format("create catalog %s with('type'='%s')", name, CATALOG_TYPE_VALUE_GENERIC_IN_MEMORY); tableEnv.executeSql(ddl); assertTrue(tableEnv.getCatalog(name).isPresent()); assertTrue(tableEnv.getCatalog(name).get() instanceof GenericInMemoryCatalog); } @Test public void testDropCatalog() { String name = "c1"; TableEnvironment tableEnv = getTableEnvironment(); String ddl = String.format("create catalog %s with('type'='%s')", name, CATALOG_TYPE_VALUE_GENERIC_IN_MEMORY); tableEnv.executeSql(ddl); assertTrue(tableEnv.getCatalog(name).isPresent()); ddl = String.format("drop catalog %s", name); tableEnv.executeSql(ddl); assertFalse(tableEnv.getCatalog(name).isPresent()); } @Test private TableEnvironment getTableEnvironment() { EnvironmentSettings settings = EnvironmentSettings.newInstance().inStreamingMode().build(); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); return StreamTableEnvironment.create(env, settings); } }
```suggestion throw new AnalysisException("Float or double can not used as a sort key, use decimal instead."); ```
private void analyzeOrderByClause() throws AnalysisException { if (selectStmt.getOrderByElements() == null) { /** * The keys type of Materialized view is aggregation. * All of group by columns are keys of materialized view. */ if (mvKeysType == KeysType.AGG_KEYS) { for (MVColumnItem mvColumnItem : mvColumnItemList) { if (mvColumnItem.getAggregationType() != null) { break; } if (mvColumnItem.getType().isFloatingPointType()) { throw new AnalysisException("Float or double can not used as a key, use decimal instead."); } mvColumnItem.setIsKey(true); } return; } /** * There is no aggregation function in materialized view. * Supplement key of MV columns * For example: select k1, k2 ... kn from t1 * The default key columns are first 36 bytes of the columns in define order. * If the number of columns in the first 36 is less than 3, the first 3 columns will be used. * column: k1, k2, k3... km. The key is true. * Supplement non-key of MV columns * column: km... kn. The key is false, aggregation type is none, isAggregationTypeImplicit is true. */ int keyStorageLayoutBytes = 0; for (int i = 0; i < selectStmt.getResultExprs().size(); i++) { MVColumnItem mvColumnItem = mvColumnItemList.get(i); Expr resultColumn = selectStmt.getResultExprs().get(i); keyStorageLayoutBytes += resultColumn.getType().getStorageLayoutBytes(); if ((!mvColumnItem.getType().isFloatingPointType()) && ((i + 1) <= FeConstants.shortkey_max_column_count || keyStorageLayoutBytes < FeConstants.shortkey_maxsize_bytes)) { mvColumnItem.setIsKey(true); } else { if (i == 0) { throw new AnalysisException("The first column could not be float or double, " + "use decimal instead."); } mvColumnItem.setAggregationType(AggregateType.NONE, true); } } return; } List<OrderByElement> orderByElements = selectStmt.getOrderByElements(); if (orderByElements.size() > mvColumnItemList.size()) { throw new AnalysisException("The number of columns in order clause must be less then " + "the number of columns in select clause"); } if (beginIndexOfAggregation != -1 && (orderByElements.size() != (beginIndexOfAggregation))) { throw new AnalysisException("The key of columns in mv must be all of group by columns"); } for (int i = 0; i < orderByElements.size(); i++) { Expr orderByElement = orderByElements.get(i).getExpr(); if (!(orderByElement instanceof SlotRef)) { throw new AnalysisException("The column in order clause must be original column without calculation. " + "Error column: " + orderByElement.toSql()); } MVColumnItem mvColumnItem = mvColumnItemList.get(i); SlotRef slotRef = (SlotRef) orderByElement; if (!mvColumnItem.getName().equalsIgnoreCase(slotRef.getColumnName())) { throw new AnalysisException("The order of columns in order by clause must be same as " + "the order of columns in select list"); } Preconditions.checkState(mvColumnItem.getAggregationType() == null); mvColumnItem.setIsKey(true); } for (MVColumnItem mvColumnItem : mvColumnItemList) { if (mvColumnItem.isKey()) { continue; } if (mvColumnItem.getAggregationType() != null) { break; } mvColumnItem.setAggregationType(AggregateType.NONE, true); } }
throw new AnalysisException("Float or double can not used as a key, use decimal instead.");
private void analyzeOrderByClause() throws AnalysisException { if (selectStmt.getOrderByElements() == null) { supplyOrderColumn(); return; } List<OrderByElement> orderByElements = selectStmt.getOrderByElements(); if (orderByElements.size() > mvColumnItemList.size()) { throw new AnalysisException("The number of columns in order clause must be less then " + "the number of " + "columns in select clause"); } if (beginIndexOfAggregation != -1 && (orderByElements.size() != (beginIndexOfAggregation))) { throw new AnalysisException("The key of columns in mv must be all of group by columns"); } for (int i = 0; i < orderByElements.size(); i++) { Expr orderByElement = orderByElements.get(i).getExpr(); if (!(orderByElement instanceof SlotRef)) { throw new AnalysisException("The column in order clause must be original column without calculation. " + "Error column: " + orderByElement.toSql()); } MVColumnItem mvColumnItem = mvColumnItemList.get(i); SlotRef slotRef = (SlotRef) orderByElement; if (!mvColumnItem.getName().equalsIgnoreCase(slotRef.getColumnName())) { throw new AnalysisException("The order of columns in order by clause must be same as " + "the order of columns in select list"); } Preconditions.checkState(mvColumnItem.getAggregationType() == null); mvColumnItem.setIsKey(true); } for (MVColumnItem mvColumnItem : mvColumnItemList) { if (mvColumnItem.isKey()) { continue; } if (mvColumnItem.getAggregationType() != null) { break; } mvColumnItem.setAggregationType(AggregateType.NONE, true); } }
class CreateMaterializedViewStmt extends DdlStmt { public static final String MATERIALIZED_VIEW_NAME_PRFIX = "__doris_materialized_view_"; private String mvName; private SelectStmt selectStmt; private Map<String, String> properties; private int beginIndexOfAggregation = -1; /** * origin stmt: select k1, k2, v1, sum(v2) from base_table group by k1, k2, v1 * mvColumnItemList: [k1: {name: k1, isKey: true, aggType: null, isAggregationTypeImplicit: false}, * k2: {name: k2, isKey: true, aggType: null, isAggregationTypeImplicit: false}, * v1: {name: v1, isKey: true, aggType: null, isAggregationTypeImplicit: false}, * v2: {name: v2, isKey: false, aggType: sum, isAggregationTypeImplicit: false}] * This order of mvColumnItemList is meaningful. */ private List<MVColumnItem> mvColumnItemList = Lists.newArrayList(); private String baseIndexName; private String dbName; private KeysType mvKeysType = KeysType.DUP_KEYS; public CreateMaterializedViewStmt(String mvName, SelectStmt selectStmt, Map<String, String> properties) { this.mvName = mvName; this.selectStmt = selectStmt; this.properties = properties; } public String getMVName() { return mvName; } public List<MVColumnItem> getMVColumnItemList() { return mvColumnItemList; } public String getBaseIndexName() { return baseIndexName; } public Map<String, String> getProperties() { return properties; } public String getDBName() { return dbName; } public KeysType getMVKeysType() { return mvKeysType; } @Override public void analyze(Analyzer analyzer) throws UserException { if (!Config.enable_materialized_view) { throw new AnalysisException("The materialized view is disabled"); } super.analyze(analyzer); FeNameFormat.checkTableName(mvName); selectStmt.analyze(analyzer); if (selectStmt.getAggInfo() != null) { mvKeysType = KeysType.AGG_KEYS; } analyzeSelectClause(); analyzeFromClause(); if (selectStmt.getWhereClause() != null) { throw new AnalysisException("The where clause is not supported in add materialized view clause, expr:" + selectStmt.getWhereClause().toSql()); } if (selectStmt.getHavingPred() != null) { throw new AnalysisException("The having clause is not supported in add materialized view clause, expr:" + selectStmt.getHavingPred().toSql()); } analyzeOrderByClause(); if (selectStmt.getLimit() != -1) { throw new AnalysisException("The limit clause is not supported in add materialized view clause, expr:" + " limit " + selectStmt.getLimit()); } } public void analyzeSelectClause() throws AnalysisException { SelectList selectList = selectStmt.getSelectList(); if (selectList.getItems().isEmpty()) { throw new AnalysisException("The materialized view must contain at least one column"); } boolean meetAggregate = false; Set<String> mvColumnNameSet = Sets.newHashSet(); /** * 1. The columns of mv must be a single column or a aggregate column without any calculate. * Also the children of aggregate column must be a single column without any calculate. * For example: * a, sum(b) is legal. * a+b, sum(a+b) is illegal. * 2. The SUM, MIN, MAX function is supported. The other function will be supported in the future. * 3. The aggregate column must be declared after the single column. */ for (int i = 0; i < selectList.getItems().size(); i++) { SelectListItem selectListItem = selectList.getItems().get(i); Expr selectListItemExpr = selectListItem.getExpr(); if (!(selectListItemExpr instanceof SlotRef) && !(selectListItemExpr instanceof FunctionCallExpr)) { throw new AnalysisException("The materialized view only support the single column or function expr. " + "Error column: " + selectListItemExpr.toSql()); } if (selectListItem.getExpr() instanceof SlotRef) { if (meetAggregate) { throw new AnalysisException("The aggregate column should be after the single column"); } SlotRef slotRef = (SlotRef) selectListItem.getExpr(); String columnName = slotRef.getColumnName().toLowerCase(); if (!mvColumnNameSet.add(columnName)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_DUP_FIELDNAME, columnName); } MVColumnItem mvColumnItem = new MVColumnItem(columnName); mvColumnItem.setType(slotRef.getType().getPrimitiveType()); mvColumnItemList.add(mvColumnItem); } else if (selectListItem.getExpr() instanceof FunctionCallExpr) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) selectListItem.getExpr(); String functionName = functionCallExpr.getFnName().getFunction(); Expr defineExpr = null; if (!functionName.equalsIgnoreCase("sum") && !functionName.equalsIgnoreCase("min") && !functionName.equalsIgnoreCase("max")) { throw new AnalysisException("The materialized view only support the sum, min and max aggregate " + "function. Error function: " + functionCallExpr.toSqlImpl()); } Preconditions.checkState(functionCallExpr.getChildren().size() == 1); Expr functionChild0 = functionCallExpr.getChild(0); SlotRef slotRef; if (functionChild0 instanceof SlotRef) { slotRef = (SlotRef) functionChild0; } else if (functionChild0 instanceof CastExpr && (functionChild0.getChild(0) instanceof SlotRef)) { slotRef = (SlotRef) functionChild0.getChild(0); } else { throw new AnalysisException("The children of aggregate function only support one original column. " + "Error function: " + functionCallExpr.toSqlImpl()); } meetAggregate = true; String columnName = slotRef.getColumnName().toLowerCase(); if (!mvColumnNameSet.add(columnName)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_DUP_FIELDNAME, columnName); } if (beginIndexOfAggregation == -1) { beginIndexOfAggregation = i; } MVColumnItem mvColumnItem = new MVColumnItem(columnName); mvColumnItem.setAggregationType(AggregateType.valueOf(functionName.toUpperCase()), false); mvColumnItem.setDefineExpr(defineExpr); mvColumnItemList.add(mvColumnItem); } } if (beginIndexOfAggregation == 0) { throw new AnalysisException("The materialized view must contain at least one key column"); } } private void analyzeFromClause() throws AnalysisException { List<TableRef> tableRefList = selectStmt.getTableRefs(); if (tableRefList.size() != 1) { throw new AnalysisException("The materialized view only support one table in from clause."); } TableName tableName = tableRefList.get(0).getName(); baseIndexName = tableName.getTbl(); dbName = tableName.getDb(); } @Override public String toSql() { return null; } }
class CreateMaterializedViewStmt extends DdlStmt { public static final String MATERIALIZED_VIEW_NAME_PRFIX = "__doris_materialized_view_"; private String mvName; private SelectStmt selectStmt; private Map<String, String> properties; private int beginIndexOfAggregation = -1; /** * origin stmt: select k1, k2, v1, sum(v2) from base_table group by k1, k2, v1 * mvColumnItemList: [k1: {name: k1, isKey: true, aggType: null, isAggregationTypeImplicit: false}, * k2: {name: k2, isKey: true, aggType: null, isAggregationTypeImplicit: false}, * v1: {name: v1, isKey: true, aggType: null, isAggregationTypeImplicit: false}, * v2: {name: v2, isKey: false, aggType: sum, isAggregationTypeImplicit: false}] * This order of mvColumnItemList is meaningful. */ private List<MVColumnItem> mvColumnItemList = Lists.newArrayList(); private String baseIndexName; private String dbName; private KeysType mvKeysType = KeysType.DUP_KEYS; public CreateMaterializedViewStmt(String mvName, SelectStmt selectStmt, Map<String, String> properties) { this.mvName = mvName; this.selectStmt = selectStmt; this.properties = properties; } public String getMVName() { return mvName; } public List<MVColumnItem> getMVColumnItemList() { return mvColumnItemList; } public String getBaseIndexName() { return baseIndexName; } public Map<String, String> getProperties() { return properties; } public String getDBName() { return dbName; } public KeysType getMVKeysType() { return mvKeysType; } @Override public void analyze(Analyzer analyzer) throws UserException { if (!Config.enable_materialized_view) { throw new AnalysisException("The materialized view is disabled"); } super.analyze(analyzer); FeNameFormat.checkTableName(mvName); selectStmt.analyze(analyzer); if (selectStmt.getAggInfo() != null) { mvKeysType = KeysType.AGG_KEYS; } analyzeSelectClause(); analyzeFromClause(); if (selectStmt.getWhereClause() != null) { throw new AnalysisException("The where clause is not supported in add materialized view clause, expr:" + selectStmt.getWhereClause().toSql()); } if (selectStmt.getHavingPred() != null) { throw new AnalysisException("The having clause is not supported in add materialized view clause, expr:" + selectStmt.getHavingPred().toSql()); } analyzeOrderByClause(); if (selectStmt.getLimit() != -1) { throw new AnalysisException("The limit clause is not supported in add materialized view clause, expr:" + " limit " + selectStmt.getLimit()); } } public void analyzeSelectClause() throws AnalysisException { SelectList selectList = selectStmt.getSelectList(); if (selectList.getItems().isEmpty()) { throw new AnalysisException("The materialized view must contain at least one column"); } boolean meetAggregate = false; Set<String> mvColumnNameSet = Sets.newHashSet(); /** * 1. The columns of mv must be a single column or a aggregate column without any calculate. * Also the children of aggregate column must be a single column without any calculate. * For example: * a, sum(b) is legal. * a+b, sum(a+b) is illegal. * 2. The SUM, MIN, MAX function is supported. The other function will be supported in the future. * 3. The aggregate column must be declared after the single column. */ for (int i = 0; i < selectList.getItems().size(); i++) { SelectListItem selectListItem = selectList.getItems().get(i); Expr selectListItemExpr = selectListItem.getExpr(); if (!(selectListItemExpr instanceof SlotRef) && !(selectListItemExpr instanceof FunctionCallExpr)) { throw new AnalysisException("The materialized view only support the single column or function expr. " + "Error column: " + selectListItemExpr.toSql()); } if (selectListItem.getExpr() instanceof SlotRef) { if (meetAggregate) { throw new AnalysisException("The aggregate column should be after the single column"); } SlotRef slotRef = (SlotRef) selectListItem.getExpr(); String columnName = slotRef.getColumnName().toLowerCase(); if (!mvColumnNameSet.add(columnName)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_DUP_FIELDNAME, columnName); } MVColumnItem mvColumnItem = new MVColumnItem(columnName); mvColumnItem.setType(slotRef.getType()); mvColumnItemList.add(mvColumnItem); } else if (selectListItem.getExpr() instanceof FunctionCallExpr) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) selectListItem.getExpr(); String functionName = functionCallExpr.getFnName().getFunction(); Expr defineExpr = null; if (!functionName.equalsIgnoreCase("sum") && !functionName.equalsIgnoreCase("min") && !functionName.equalsIgnoreCase("max")) { throw new AnalysisException("The materialized view only support the sum, min and max aggregate " + "function. Error function: " + functionCallExpr.toSqlImpl()); } Preconditions.checkState(functionCallExpr.getChildren().size() == 1); Expr functionChild0 = functionCallExpr.getChild(0); SlotRef slotRef; if (functionChild0 instanceof SlotRef) { slotRef = (SlotRef) functionChild0; } else if (functionChild0 instanceof CastExpr && (functionChild0.getChild(0) instanceof SlotRef)) { slotRef = (SlotRef) functionChild0.getChild(0); } else { throw new AnalysisException("The children of aggregate function only support one original column. " + "Error function: " + functionCallExpr.toSqlImpl()); } meetAggregate = true; String columnName = slotRef.getColumnName().toLowerCase(); if (!mvColumnNameSet.add(columnName)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_DUP_FIELDNAME, columnName); } if (beginIndexOfAggregation == -1) { beginIndexOfAggregation = i; } MVColumnItem mvColumnItem = new MVColumnItem(columnName); mvColumnItem.setAggregationType(AggregateType.valueOf(functionName.toUpperCase()), false); mvColumnItem.setDefineExpr(defineExpr); mvColumnItemList.add(mvColumnItem); } } if (beginIndexOfAggregation == 0) { throw new AnalysisException("The materialized view must contain at least one key column"); } } private void analyzeFromClause() throws AnalysisException { List<TableRef> tableRefList = selectStmt.getTableRefs(); if (tableRefList.size() != 1) { throw new AnalysisException("The materialized view only support one table in from clause."); } TableName tableName = tableRefList.get(0).getName(); baseIndexName = tableName.getTbl(); dbName = tableName.getDb(); } /* This function is used to supply order by columns and calculate short key count */ private void supplyOrderColumn() throws AnalysisException { /** * The keys type of Materialized view is aggregation. * All of group by columns are keys of materialized view. */ if (mvKeysType == KeysType.AGG_KEYS) { for (MVColumnItem mvColumnItem : mvColumnItemList) { if (mvColumnItem.getAggregationType() != null) { break; } mvColumnItem.setIsKey(true); } } else if (mvKeysType == KeysType.DUP_KEYS) { /** * There is no aggregation function in materialized view. * Supplement key of MV columns * The key is same as the short key in duplicate table * For example: select k1, k2 ... kn from t1 * The default key columns are first 36 bytes of the columns in define order. * If the number of columns in the first 36 is more than 3, the first 3 columns will be used. * column: k1, k2, k3. The key is true. * Supplement non-key of MV columns * column: k4... kn. The key is false, aggregation type is none, isAggregationTypeImplicit is true. */ int theBeginIndexOfValue = 0; int keySizeByte = 0; for (; theBeginIndexOfValue < mvColumnItemList.size(); theBeginIndexOfValue++) { MVColumnItem column = mvColumnItemList.get(theBeginIndexOfValue); keySizeByte += column.getType().getIndexSize(); if (theBeginIndexOfValue + 1 > FeConstants.shortkey_max_column_count || keySizeByte > FeConstants.shortkey_maxsize_bytes) { if (theBeginIndexOfValue == 0 && column.getType().getPrimitiveType().isCharFamily()) { column.setIsKey(true); theBeginIndexOfValue++; } break; } if (column.getType().isFloatingPointType()) { break; } if (column.getType().getPrimitiveType() == PrimitiveType.VARCHAR) { column.setIsKey(true); theBeginIndexOfValue++; break; } column.setIsKey(true); } if (theBeginIndexOfValue == 0) { throw new AnalysisException("The first column could not be float or double type, use decimal instead"); } for (; theBeginIndexOfValue < mvColumnItemList.size(); theBeginIndexOfValue++) { MVColumnItem mvColumnItem = mvColumnItemList.get(theBeginIndexOfValue); mvColumnItem.setAggregationType(AggregateType.NONE, true); } } } @Override public String toSql() { return null; } }
Exactly, so if the prefetch is enabled and drain couldn't emit downstream, we break from the while-loop in line 216; that way, we don't consider the message as consumed, hence increment won't happen.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.warning("lockToken[{}] Couldn't release the message.", message.getLockToken(), error), () -> logger.verbose("lockToken[{}] Message successfully released.", message.getLockToken())); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } }
numberConsumed++;
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> logger.warning("lockToken[{}] Couldn't release the message.", message.getLockToken(), error), () -> logger.verbose("lockToken[{}] Message successfully released.", message.getLockToken())); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); if (workQueue.peek() == work) { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if " + "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); getOrUpdateCurrentWork(); } else { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null) { if (currentWork.isTerminal()) { REQUESTED.updateAndGet(this, currentRequest -> { final int remainingEvents = currentWork.getRemainingEvents(); if (remainingEvents < 1) { return currentRequest; } final long difference = currentRequest - remainingEvents; logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]" + " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents, difference); return difference < 0 ? 0 : difference; }); currentWork = workQueue.poll(); continue; } final SynchronousReceiveWork work = currentWork; logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(), work.getNumberOfEvents()); work.start(); requestUpstream(work.getNumberOfEvents()); return work; } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]", currentRequested, numberOfMessages, difference); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { logger.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); if (workQueue.peek() == work) { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] First work in queue. Requesting upstream if " + "needed.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); getOrUpdateCurrentWork(); } else { logger.verbose("workId[{}] numberOfEvents[{}] timeout[{}] Queuing receive work.", work.getId(), work.getNumberOfEvents(), work.getTimeout()); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null) { if (currentWork.isTerminal()) { REQUESTED.updateAndGet(this, currentRequest -> { final int remainingEvents = currentWork.getRemainingEvents(); if (remainingEvents < 1) { return currentRequest; } final long difference = currentRequest - remainingEvents; logger.verbose("Updating REQUESTED because current work item is terminal. currentRequested[{}]" + " currentWork.remainingEvents[{}] difference[{}]", currentRequest, remainingEvents, difference); return difference < 0 ? 0 : difference; }); currentWork = workQueue.poll(); continue; } final SynchronousReceiveWork work = currentWork; logger.verbose("workId[{}] numberOfEvents[{}] Current work updated.", work.getId(), work.getNumberOfEvents()); work.start(); requestUpstream(work.getNumberOfEvents()); return work; } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { logger.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { logger.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; logger.verbose("Requesting messages from upstream. currentRequested[{}] numberOfMessages[{}] difference[{}]", currentRequested, numberOfMessages, difference); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }