/// <summary> /// NextTx() is called to start a new transaction, the out parameter “seqId” is used to identify the transaction, /// which is also used in Ack() and Fail(). In NextTx(), user can emit data to Java side. /// The data will be stored in ZooKeeper to support replay. Because the capacity of ZooKeeper is very limited, /// user should only emit metadata, not bulk data in transactional spout. /// /// Just like their non-transactional counter-part, NextTx(), Ack(), and Fail() are all called in a tight loop in a single thread in C# process. /// When there are no data to emit, it is courteous to have NextTx sleep for a short amount of time (10 milliseconds) so as not to waste too much CPU. /// </summary> /// <param name="seqId">Sequence Id of the tuple</param> /// <param name="parms"></param> public void NextTx(out long seqId, Dictionary <string, Object> parms) { Context.Logger.Info("NextTx enter"); // emit metata for the transaction for (int i = 0; i < 2; i++) { Person person = persons[rand.Next(0, persons.Length)]; Context.Logger.Info("Emit: {0}", person.ToString()); this.ctx.Emit(new Values(person)); } State state = stateStore.Create(); seqId = state.ID; Context.Logger.Info("NextTx exit, seqId: {0}", seqId); }
/// <summary> /// NextTx() is called to start a new transaction, the out parameter “seqId” is used to identify the transaction, /// which is also used in Ack() and Fail(). In NextTx(), user can emit data to Java side. /// The data will be stored in ZooKeeper to support replay. Because the capacity of ZooKeeper is very limited, /// user should only emit metadata, not bulk data in transactional spout. /// /// Just like their non-transactional counter-part, NextTx(), Ack(), and Fail() are all called in a tight loop in a single thread in C# process. /// When there are no data to emit, it is courteous to have NextTx sleep for a short amount of time (10 milliseconds) so as not to waste too much CPU. /// </summary> /// <param name="seqId">Sequence Id of the tuple</param> /// <param name="parms"></param> public void NextTx(out long seqId, Dictionary <string, Object> parms) { Context.Logger.Info("NextTx enter"); for (int i = 0; i < 2; i++) { string filename = dataSourceFiles[rand.Next(0, 3)]; this.ctx.Emit(new Values(filename)); Context.Logger.Info("Emit: {0}", filename); } if (Context.pluginType != SCPPluginType.SCP_NET_LOCAL) { State state = stateStore.Create(); seqId = state.ID; } else { lastSeqId++; seqId = lastSeqId; } Context.Logger.Info("NextTx exit, seqId: {0}", seqId); }
public void NextTx(out long seqId, Dictionary <string, Object> parms) { Context.Logger.Info("NextTx enter"); KafkaMeta meta = (KafkaMeta)parms[Constants.KAFKA_META]; // the previous kafka meta which get from zookeeper KafkaMeta preMeta = null; string regKey = "LastSend-" + meta.Topic; var reg = stateStore.Get <Registry>(regName); if (reg.ExistsKey(regKey)) { preMeta = reg.GetKeyValue <KafkaMeta>(regKey); } else { reg.CreateKey(regKey); } bool isReady = false; if (preMeta != null) { foreach (int i in preMeta.PartitionOffsets.Keys) { long preEndOffset = preMeta.PartitionOffsets[i].EndOffset; // set current begin offset by previous endoffset from stored kafka meta meta.PartitionOffsets[i].BeginOffset = preEndOffset; long endOffset = meta.PartitionOffsets[i].EndOffset; Context.Logger.Info(String.Format("For partition {0}, the begin offset {1}, the end offset {2}", i, preEndOffset, endOffset)); if (endOffset > preEndOffset) { isReady = true; } } } else { isReady = true; Context.Logger.Info("preMeta is null"); } if (isReady) { State state = stateStore.Create(); Context.Logger.Info("stateid in spout {0}", state.ID); meta.StateId = BitConverter.GetBytes(state.ID); // emit kafka meta this.ctx.Emit("mydefault", new Values(meta)); // save kafka meta to zookeeper reg.SetKeyValue(regKey, meta); seqId = state.ID; Context.Logger.Info("NextTx exit, seqId: {0}", seqId); } else { seqId = -1L; } System.Threading.Thread.Sleep(1000); }