public RecordDataUpdateResult applyUpdate(RecordUpdate update) { if ((state == RecordDataState.FULL) || (state == RecordDataState.DELETED)) { // throw new Exception("applyUpdate() called on fully populated record!"); Debug.WriteLine("warn: applyUpdate() called on fully populated record. ignoring."); return RecordDataUpdateResult.FINAL; } switch (update.type) { case RecordUpdateTypes.DELETION_TOMBSTONE: this.state = RecordDataState.DELETED; return RecordDataUpdateResult.FINAL; case RecordUpdateTypes.FULL: this.state = RecordDataState.FULL; this.data = update.data; return RecordDataUpdateResult.FINAL; case RecordUpdateTypes.NONE: return RecordDataUpdateResult.SUCCESS; case RecordUpdateTypes.PARTIAL: throw new Exception("partial update not implemented"); default: throw new Exception("unknown update type"); } }
public void setValue(RecordKey key, RecordUpdate update) { if (this.is_frozen) { throw new Exception("snapshot not writable! " + this.frozen_at_snapshotnumber); } // add our snapshot_number to the end of the keyspace key.appendKeyPart(new RecordKeyType_AttributeTimestamp(this.current_snapshot)); // wrap the update into a sub-update, mostly because tombstones need to be "real" records // to us var sub_update = RecordUpdate.WithPayload(update.encode()); next_stage.setValue(key, sub_update); }
internal void _recordLogEntry(string from_server_guid, long logstamp, RecordUpdate logdata) { RecordKey logkey = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(from_server_guid) .appendKeyPart(new RecordKeyType_Long(logstamp)); next_stage.setValue(logkey, logdata); pusher.wakeUpLogSleepers(); }
internal void applyLogEntry(string from_server_guid, long logstamp, RecordUpdate logdata) { // (0) unpack the data BlockAccessor ba = new BlockAccessor(logdata.data); ISegmentBlockDecoder decoder = new SegmentBlockBasicDecoder(ba); // (1) add it to our copy of that server's log this._recordLogEntry(from_server_guid, logstamp, logdata); // (2) add it to the database foreach (var kvp in decoder.sortedWalk()) { RecordKey local_data_key = new RecordKey() .appendKeyPart("_data"); foreach (var part in kvp.Key.key_parts) { local_data_key.appendKeyPart(part); } next_stage.setValue(local_data_key, kvp.Value); } }
public void setValue(RecordKey skey, RecordUpdate supdate) { checkActive(); // (1) write our repl log entry DateTime now = DateTime.Now; long logstamp = id_gen.nextTimestamp(); RecordKey logkey = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(ctx.server_guid) .appendKeyPart(new RecordKeyType_Long(logstamp)); // (1.1) pack the key/value together into the log entry byte[] packed_update; { MemoryStream writer = new MemoryStream(); // TODO: this seems like a really inefficient way to write out a key ISegmentBlockEncoder encoder = new SegmentBlockBasicEncoder(); encoder.setStream(writer); encoder.add(skey, supdate); encoder.flush(); packed_update = writer.ToArray(); } RecordUpdate logupdate = RecordUpdate.WithPayload(packed_update); Console.WriteLine("writing log entry: {0} -> [ {1} = {2} ]", logkey, skey, supdate); next_stage.setValue(logkey, logupdate); // (2) trigger the repl notifier that there is a new entry to push pusher.wakeUpLogSleepers(); // (2) write the record key Console.WriteLine("writing data entry: {0} = {1}", skey, supdate); RecordKey private_record_key = new RecordKey() .appendKeyPart("_data"); foreach (var part in skey.key_parts) { private_record_key.appendKeyPart(part); } next_stage.setValue(private_record_key, supdate); }
public void setValue(RecordKey key, RecordUpdate update) { // RecordKey key = key.clone(); // add our partition subset key to the begning RecordKey newkey = new RecordKey().appendKeyPart(this.subset_name).appendKeyPart(key); next_stage.setValue(newkey, update); }
public void setValue(RecordKey key, RecordUpdate update) { // RecordKey key = key.clone(); if (this.is_frozen) { throw new Exception("snapshot not writable! " + this.frozen_at_timestamp); } // (1) get our timestamp long timestamp = id_gen.nextTimestamp(); // (2) add our timestamp attribute to the end of the keyspace key.appendKeyPart(new RecordKeyType_AttributeTimestamp(timestamp)); next_stage.setValue(key, update); }
public void add(RecordKey key, RecordUpdate data) { byte[] keybytes = key.encode(); byte[] databytes = data.encode(); RecordInfo ri; ri.record_start_pos = _curPos(); ri.key_len = keybytes.Length; ri.data_len = databytes.Length; output.Write(keybytes, 0, keybytes.Length); output.Write(databytes, 0, databytes.Length); record_offsets.Add(ri); }
public void add(RecordKey key, RecordUpdate data) { subenc.add(key, data); }
public Feature(DataRecord record, BaseFile baseFile, Cell cell) { _dataRecord = record; this.baseFile = baseFile; this.cell = cell; var v001 = record.Fields.GetFieldByTag("0001"); // FRID : Feature Record Identifier var frid = record.Fields.GetFieldByTag("FRID"); if (frid != null) { RecordIdentificationNumber = frid.GetUInt32("RCNM"); RecordName = frid.GetUInt32("RCID"); Primitive = (GeometricPrimitive)frid.GetUInt32("PRIM"); Group = frid.GetUInt32("GRUP"); Code = frid.GetUInt32("OBJL"); RecordVersion = frid.GetUInt32("RVER"); RecordUpdateInstruction = (RecordUpdate)frid.GetUInt32("RUIN"); } // FOID : Feature Object Identifier var foid = record.Fields.GetFieldByTag("FOID"); if (foid != null) { var agen = foid.GetUInt32("AGEN"); var fidn = foid.GetUInt32("FIDN"); var fids = foid.GetUInt32("FIDS"); lnam = new LongName(agen, fidn, fids); } // ATTF : Attributes var attr = record.Fields.GetFieldByTag("ATTF"); if (attr != null) { Attributes = GetAttributes(attr, baseFile); } // NATF : National attributes NATF. var natf = record.Fields.GetFieldByTag("NATF"); if (natf != null) { var natfAttr = GetAttributes(natf, baseFile); if (Attributes != null) { foreach (var entry in natfAttr) { Attributes.Add(entry.Key, entry.Value); } } else { Attributes = natfAttr; } } // FFPC : Feature Record To Feature Object Pointer Control var ffpc = record.Fields.GetFieldByTag("FFPC"); if (ffpc != null) { FeatureObjectPointerUpdateInstruction = (RecordUpdate)ffpc.GetUInt32("FFUI"); FeatureObjectPointerIndex = ffpc.GetUInt32("FFIX"); NumberOfFeatureObjectPointers = ffpc.GetUInt32("NFPT"); } // <R> FFPT : Feature Record To Feature Object Pointer var ffpt = record.Fields.GetFieldByTag("FFPT"); if (ffpt != null) { FeaturePtrs = GetFFPTs(ffpt); //var lnam = new LongName(ffpt.GetBytes("LNAM")); //var rind = ffpt.GetUInt32("RIND"); //var comt = ffpt.GetString("COMT"); } // FSPC : Feature Record to Spatial Record Pointer Control var fspc = record.Fields.GetFieldByTag("FSPC"); if (fspc != null) { FeatureToSpatialRecordPointerUpdateInstruction = (RecordUpdate)fspc.GetUInt32("FSUI"); FeatureToSpatialRecordPointerIndex = fspc.GetUInt32("FSIX"); NumberOfFeatureToSpatialRecordPointers = fspc.GetUInt32("NSPT"); } // FSPT : Feature Record to Spatial Record Pointer var fspt = record.Fields.GetFieldByTag("FSPT"); if (fspt != null) { VectorPtrs = GetFSPTs(fspt); } }
private static void SetDns() { ILog _log = LogManager.GetLogger(typeof(Program)); LogManager.Configure("logs\\log.txt", 1024, false); int success = 0; try { Config cfg = Config.Load("config.json"); DnspodClient client = new DnspodClient(cfg.email, cfg.password); DomainList list = client.GetDomains(); Dictionary <string, SimpleDDNS.Clients.Dnspod.Domain> domainDict = list.GetDomainDict(); if (cfg.domains != null) { for (int i = 0; i < cfg.domains.Count; i++) { if (cfg.domains[i].records != null && cfg.domains[i].records.Count > 0 && domainDict.ContainsKey(cfg.domains[i].name)) { SimpleDDNS.Clients.Dnspod.Domain d = domainDict[cfg.domains[i].name]; RecordList rlist = client.GetRecords(d.id); if (rlist != null && rlist.records != null) { Dictionary <string, SimpleDDNS.Clients.Dnspod.Record> records = rlist.GetRecordDict(); for (int j = 0; j < cfg.domains[i].records.Count; j++) { Record r = cfg.domains[i].records[j]; if (records.ContainsKey(r.name)) { string value = GetIP(r.index); if (r.ip == IPType.internet) { value = GetIP(-1); } try { RecordUpdate ru = client.UpdateRecord(d.id, records[r.name].id, r.name, value); if (ru != null && ru.status != null && ru.status.code == "1") { string msg = string.Format("成功解析{0}.{1} -> {2}", r.name, d.name, value); Console.WriteLine(msg); _log.Info(msg); success++; } } catch (Exception ex) { _log.Error(ex); } } } } } } } } catch (Exception ex) { _log.Error(ex); } Console.WriteLine("成功解析{0}个域名! 3秒钟后退出...", success); Thread.Sleep(3000); }
public void add(RecordKey key, RecordUpdate data) { byte[] keybytes = key.encode(); byte[] databytes = data.encode(); writeEncoded(output,keybytes); output.WriteByte(KEY_VAL_SEP); writeEncoded(output,databytes); output.WriteByte(END_OF_LINE); }
public void setValue(RecordKey key, RecordUpdate update) { // build a byte[] for the updates using the basic block encoder MemoryStream writer = new MemoryStream(); // TODO: this seems like a really inefficient way to write out a key ISegmentBlockEncoder encoder = new SegmentBlockBasicEncoder(); encoder.setStream(writer); encoder.add(key, update); encoder.flush(); writer.Flush(); this.addCommand((byte)LogCommands.UPDATE, writer.ToArray()); // Writes are actually applied to the workingSegment when the LgoWriter pushes them to the ILogReceiver. // This assures, for example, that DISK_ATOMIC writes to not apply to the segments until the writegroup is flushed. }