public bool isPartOfAtomicCommand(Command command) { try{ reentrantLock.enter(); Command rootCommand = null; atomicHelpMap.TryGetValue(command,out rootCommand); return rootCommand != null; }finally { reentrantLock.exit(); } }
public void register(Command rootCommand,List<Command>list) { try{ reentrantLock.enter(); List<Command> newList=new List<Command>(); newList.AddRange(list); foreach (Command command in newList) { atomicHelpMap.Add(command,rootCommand); } atomicMap.Add(rootCommand,newList); }finally { reentrantLock.exit(); } }
public void insertMessage(int version, Command command) { lock (toLock) { map.Add(version, command); DateTime l = DateTime.Now.ToUniversalTime(); List<Command> commandList; time.TryGetValue(l,out commandList); if (commandList == null) { commandList = new List<Command>(); time.Add(l, commandList); } commandList.Add(command); revertTime.Add(command, l); } }
/** * consume * Discriminate the command handler * Called by the core. * @param command * @throws PlanckDBException */ public void consume(Command command) { Int32 type=command.getCommandType(); switch (type){ case PlanckDBConstants.CREATE_NEW_ENTITY:handleNewItemCommand( command);break; case PlanckDBConstants.DELETE_ENTITY:handleRemoveItemCommand(command);break; case PlanckDBConstants.ADD_CHILD_ENTITY_TO_PARENT_ENTITY: handleAddChildToParent(command);break; case PlanckDBConstants.REMOVE_CHILD_ENTITY_FROM_PARENT_ENTITY:handleRemoveChildFromParent(command);break; case PlanckDBConstants.UPDATE_ENTITY: handleUpdateNodeCommand(command);break; case PlanckDBConstants.LOCK_ENTITY:handleLockItemCommand(command);break; case PlanckDBConstants.ADD_ATTRIBUTE:handleAddAttributeCommand(command);break; case PlanckDBConstants.REMOVE_ATTRIBUTE:handleRemoveAttributeCommand(command);break; } int entityId=command.getEntityId(); int? commandSchemaId=command.GetSchemaId(); foreach (Transaction transaction in transactions) { if(commandSchemaId!=transaction.schemaId()){ transaction.getDirtySet().Add(entityId); } } }
public Command update(Command command) { try{ reentrantLock.enter(); Command rootCommand = null; atomicHelpMap.TryGetValue(command,out rootCommand); if(rootCommand==null){ return null; } List<Command> commands = null; atomicMap.TryGetValue(rootCommand,out commands); commands.Remove(command); atomicHelpMap.Remove(command); if(commands.Count==0){ atomicMap.Remove(rootCommand); return rootCommand; } return null; }finally { reentrantLock.exit(); } }
/** * handleAddChildToParent * Adds child node to parent node * @param addItemToCollectionCommand */ public void handleAddChildToParent(Command addItemToCollectionCommand) { int parentId=addItemToCollectionCommand.getEntityId(); int childId=addItemToCollectionCommand.getChildEntityId(); byte[] arcName=addItemToCollectionCommand.getArcName(); Node parent=registry.getNode(parentId); Arc oldArc = parent.getChildArc(arcName); Arc arc =registry.addChildToParent(parentId, childId,arcName); if(oldArc!=null){ foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.arcChanged(oldArc, arc); } }else{ foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.arcAdded(arc); } } }
public void sendMessage(Command command) { multicastSocket.Send(command.serialize()); }
public void produceTcp(Command command) { TcpConnection clientTcpConversation = tcpConversationPuller.lockConversation(); clientTcpConversation.sendCommand(command); tcpConversationPuller.releaseConversation(clientTcpConversation); }
/** * handleUpdateNodeCommand * Remove old attributes and set or override new ones. * @param updateItemCommand */ public void handleUpdateNodeCommand(Command updateItemCommand) { int entityId=updateItemCommand.getEntityId(); NodeAttribute[] attributes = updateItemCommand.getAttributes(); Node node=registry.getNode(entityId); List<NodeAttribute> removedAttributes = registry.updateNode(entityId, attributes); foreach (NodeAttribute oldAttribute in removedAttributes) { foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.attributeChanged(node, oldAttribute, null); } } foreach (NodeAttribute attribute in attributes) { NodeAttribute oldValue = node.getAttributes().get(attribute.getName()); if(oldValue!=null){ foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.attributeChanged(node, oldValue, attribute); } }else{ foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.attributeAdded(node, attribute); } } } }
/** * handleLockItemCommand * Locks node * @param lockCommand */ private void handleLockItemCommand(Command lockCommand) { int entityId=lockCommand.getEntityId(); bool entityLock=lockCommand.getLock(); int ownerId=entityLock?lockCommand.GetSessionId():PlanckDBConstants.NON_ENTITY_OWNER; Node node = registry.getNode(entityId); registry.lockNode(entityId,entityLock,ownerId); foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.lockChanged(node,entityLock); } }
public void consumeTcp(Command command) { tcpDistributionManager.consumeTcp(command); }
/** * handleRemoveChildFromParent * Removes child node from parent node * @param removeItemFromCollectionCommand */ public void handleRemoveChildFromParent(Command removeItemFromCollectionCommand) { int parentId=removeItemFromCollectionCommand.getEntityId(); byte[] arcName = removeItemFromCollectionCommand.getArcName(); Arc arc = registry.removeChildFromParent(parentId, arcName); foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.arcRemoved(arc); } }
/** * The method does the following:<p> * 1.in case of none atomic command<p> * - execute the command using the command executor<p> * - notify all waiting thread about the command execution<p> * 2.in case of atomic command<p> * - execute the command using the command executor<p> * - notify all waiting thread about the command execution only<p> * if the current command is the last element in the atomic command * * @param modelCommand * @throws PlanckDBException */ public void doJobs(Command modelCommand) { bool consume=true; if( modelCommand.getTransaction()==PlanckDBConstants.TRUE && modelCommand.GetSessionId()==sessionMetaData.GetSessionId()){ consume=false; } if(consume){ commandExecutor.consume(modelCommand); } if(atomicContainer.isPartOfAtomicCommand(modelCommand)){ Command rootCommand = atomicContainer.update(modelCommand); if(rootCommand!=null){ lock(rootCommand){ Monitor.PulseAll(rootCommand); } } }else{ lock(modelCommand){ Monitor.PulseAll(modelCommand); } } log.info("message done version : "+modelCommand.getVersion()+" id "+modelCommand.getEntityId()); }
/** * this is the heart of the storage,<p> * It control all the in coming commands.<p> * The method does the following<p> * 1. validate the command<p> * 2. update the version number and the conflict number (if needed)<p> * 3. push the command to the command queue<p> */ public void consume(Command command) { // update schema and coreManagerKey in command in case that the schema or coreManagerKey fields in the command are null if(command.GetSchemaId()<0){ command.setSchemaId(getSessionMetaData().getSchemaId()); } if(command.GetCoreManagerKey()==null){ command.setCoreManagerKey(getCoreManager().getKey()); } // you do not have to handle your messages which NetworkProtocolType is multicast // because you have already handle them NetworkProtocolType type = command.getNetworkProtocolType(); if(type!=null && isCast(type) && command.GetCoreManagerKey().Equals(getCoreManager().getKey())&&sessionMetaData.GetSessionId()==command.GetSessionId()){ return; } //TODO validate message if(command.isModeCommand()||command.isAtomicModelCommand()){ // model change commands // set version number or distribute if(command.getVersion()<0){ distributionManager.produceTcp(command); int version = command.getVersion(); // return if command is lock or something was wrong if(version<0|| command.isNotSucceed()){ return; } } if(command.isAtomicModelCommand()){ List<Command> commands = command.getCommands(); atomicContainer.register(command,commands); foreach (Command newCommand in commands) { commandQueue.pushCommand(newCommand); } }else{ commandQueue.pushCommand(command); } }else{ Int32 commandType = command.getCommandType(); if(commandType==PlanckDBConstants.READ_LOCK_COMMAND){ if( ! command.GetCoreManagerKey().Equals(coreManager.getKey())){ List<Command> commands=command.getCommands(); bool entityLock=command.isLocked(); foreach (Command newCommand in commands) { int entityId=newCommand.getEntityId(); int ownerId=newCommand.getOwnerId(); if(entityLock){ registry.lockEntity(entityId,true,ownerId); }else{ registry.lockEntity(entityId,false,PlanckDBConstants.NON_ENTITY_OWNER); } } }else{ distributionManager.produceTcp(command); } } } }
public void distribTcp(Command command) { throw new NotImplementedException(); }
public void sendCommand(Command command) { try{ int amount=0; byte[] commandBuffer = command.serialize(); while (amount < commandBuffer.Length) { int messageSize = Math.Min(PlanckDBConstants.MAX_TCP_MESSAGE_SIZE, commandBuffer.Length - amount); socket.GetStream().Write(commandBuffer, amount, messageSize); socket.GetStream().Flush(); amount+=messageSize; } amount=0; while (amount < PlanckDBConstants.SIZE_OF_INT) { int read = socket.GetStream().Read(buffer, amount, 4 - amount); if(read<0){ tcpConversationHolder.handleTcpConnectionFailure(this); return; } amount+= read; } int commandSize = SerializationUtils.byteArrayToInt(buffer, 0); amount=0; commandBuffer = new byte[commandSize - PlanckDBConstants.SIZE_OF_INT]; while(amount<commandSize- PlanckDBConstants.SIZE_OF_INT){ amount += socket.GetStream().Read(commandBuffer, amount, commandSize - (amount + PlanckDBConstants.SIZE_OF_INT)); } Command resultCommand = AbstractCommand.deSerialize(commandBuilder, commandBuffer, 0, commandBuffer.Length); resultCommand.fill(command); } catch (IOException e) { throw new PlanckDBException(CommandStatus.unsupported,"TCP connection failure due to IOException : "+e.Message); } }
private void consume(Command command) { coreManager.consume(command); }
/** * handleNewItemCommand * Creates new node * @param itemCommand * @throws PlanckDBException */ public void handleNewItemCommand(Command itemCommand) { int entityId= itemCommand.getEntityId(); NodeAttribute[] attributes = itemCommand.getAttributes(); bool entityLock= itemCommand.getLock(); int ownerId=entityLock? itemCommand.GetSessionId(): PlanckDBConstants.NON_ENTITY_OWNER; registry.createNewNode(entityId,entityLock,sessionMetaData.GetSessionId(),ownerId,attributes); Node node=registry.getNode(entityId); foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.newItem(node); } foreach (NodeModelChangedListener listenerNode in listenerNodes) { foreach (NodeAttribute attribute in attributes) { listenerNode.attributeChanged(node, null, attribute); } } }
public void consume(Command command) { if (command.isComposite()) { List<Command> commands = command.getCommands(); foreach (Command newCommand in commands) { int schemaId = (int)newCommand.GetSchemaId(); List<Core> list = null; coreMap.TryGetValue(schemaId, out list); if (list != null) { foreach (Core core in list) { core.consume(newCommand); } } } } if (command.isCoreCommand()) { int schemaId = (int)command.GetSchemaId(); List<Core> list = coreMap[schemaId]; if (list != null) { foreach (Core core in list) { core.consume(command); } } } }
/** * handleRemoveAttributeCommand * Removes attributes from node * @param updateItemCommand */ public void handleRemoveAttributeCommand(Command updateItemCommand) { int entityId=updateItemCommand.getEntityId(); NodeAttribute[] attributes = updateItemCommand.getAttributes(); registry.removeAttributes(entityId, attributes); Node node=registry.getNode(entityId); foreach (NodeModelChangedListener listenerNode in listenerNodes) { foreach (NodeAttribute attribute in attributes) { listenerNode.attributeChanged(node, attribute, null); } } }
/** * handleAddAttributeCommand * Adds or override new attributes to * @param addAttributeCommand */ public void handleAddAttributeCommand(Command addAttributeCommand) { int entityId=addAttributeCommand.getEntityId(); NodeAttribute[] attributes = addAttributeCommand.getAttributes(); registry.addAttributes(entityId, attributes); Node node=registry.getNode(entityId); foreach (NodeAttribute attribute in attributes) { NodeAttribute oldValue = node.getAttributes().get(attribute.getName()); if(oldValue!=null){ foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.attributeChanged(node, oldValue, attribute); } }else{ foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.attributeAdded(node, attribute); } } } }
/** * handleRemoveItemCommand * Remove node from memory * NOTE : after removing node, the node will become phantom * @param removeItemCommand */ public void handleRemoveItemCommand(Command removeItemCommand) { int entityId=removeItemCommand.getEntityId(); Node node=registry.getNode(entityId); registry.remove(entityId); foreach (NodeModelChangedListener listenerNode in listenerNodes) { listenerNode.removeItem(node); } }
public void consumeUdp(Command command) { command.setNetworkProtocolType(NetworkProtocolType.unicast); if (command.isComposite()) { List<Command> commands = command.getCommands(); foreach (Command newCommand in commands) { newCommand.setNetworkProtocolType(NetworkProtocolType.unicast); } command.Push(PlanckDBConstants.COMMANDS, PlanckDBConstants.COMMAND_LIST, commands); } command.setNetworkProtocolType(NetworkProtocolType.unicast); try { consume(command); } catch (Exception e) { log.error(e.Message, e); } }
/** * produce * Transfer the user command to the core which will eventually send the command to the server * handle the command status received by the server * @param command * @param <T> * @throws PlanckDBException * @throws EntityLockException */ public void produce(Command command) { if(command.isModeCommand()){ //noinspection SynchronizationOnLocalVariableOrMethodParameter lock(command){ getCore().consume(command); CommandStatus status = command.getStatus(); if( ! CommandStatus.success.Equals(status)){ if(CommandStatus.notNodeOwner.Equals(status)){ throw new EntityLockException(status,status.getMessage()); }else{ throw new PlanckDBException(status,status.getMessage()); } } try { Monitor.Wait(command,120000000); } catch (Exception e) { throw new PlanckDBException(status,"Fatal exception in command synchronization process ( while waiting for awake notification, an InterruptedException occurred. "); } } }else{ getCore().consume(command); } CommandStatus commandStatus =command.getStatus(); if (!CommandStatus.success.Equals(commandStatus)) { throw new PlanckDBException(commandStatus, commandStatus.getMessage() + " schema : " + sessionMetaData.getSchemaName() + " user " + sessionMetaData.getUserName()); } }
public void consumeUnicast(Command command) { throw new NotImplementedException(); }
public Command revertCommand(Command command) { int commandType=command.getCommandType(); Int32 entityId = command.getEntityId(); Int32 childEntityId = command.getChildEntityId(); Int32 schemaId=(Int32)command.GetSchemaId(); Int32 coreManagerId=command.GetCoreManagerKey(); Int32 sessionId=command.GetSessionId(); bool lockEntity=command.getLock(); bool oldLock=command.getOldLock(); NodeAttribute[] oldAttributes=command.getOldAttributes(); NodeAttribute[] attributes=command.getAttributes(); byte[] arcName=command.getArcName(); long lockTimeout=command.getLockTimeOut(); switch (commandType){ case PlanckDBConstants.CREATE_NEW_ENTITY:{ return buildDeleteNode(entityId,lockEntity,schemaId,coreManagerId,sessionId, lockTimeout, attributes); } case PlanckDBConstants.DELETE_ENTITY:{ Command newCommand= buildCreateNode(oldLock,schemaId,coreManagerId,sessionId, lockTimeout, oldAttributes); newCommand.Push(PlanckDBConstants.ENTITY_ID, PlanckDBConstants.INTEGER,entityId); return newCommand; } case PlanckDBConstants.ADD_CHILD_ENTITY_TO_PARENT_ENTITY:{ return buildRemoveChildFromParentNode(entityId,arcName,childEntityId,schemaId,coreManagerId,sessionId, lockTimeout); } case PlanckDBConstants.REMOVE_CHILD_ENTITY_FROM_PARENT_ENTITY:{ return buildAddChildToParentNode(entityId,childEntityId,arcName,schemaId,coreManagerId,sessionId, lockTimeout); } case PlanckDBConstants.UPDATE_ENTITY:{ Command newCommand = buildUpdateNode(entityId, schemaId, coreManagerId, sessionId, lockTimeout, attributes); newCommand.Push(PlanckDBConstants.OLD_ATTRIBUTES, PlanckDBConstants.ATTRIBUTE_MAP,oldAttributes); return newCommand; } case PlanckDBConstants.ADD_ATTRIBUTE:{ return buildRemoveAttributes(entityId,schemaId,coreManagerId,sessionId,lockTimeout,attributes); } case PlanckDBConstants.REMOVE_ATTRIBUTE:{ return buildAddAttributes(entityId,schemaId,coreManagerId,sessionId, lockTimeout, oldAttributes); } } throw new PlanckDBException("unsupported state the should not reach to this point of the code"); }
public void produceDatagram(Command command) { throw new NotImplementedException(); }
internal void addMessage(Command command) { throw new NotImplementedException(); }
public void consumeBroadcastTcp(Command command) { consumeTcp(command); }