Пример #1
0
 /**
  * addArcToNode method;
  * Add arc toNode
  * using this method ensure that the server has have accepted
  * the arc addition, and it also ensure that all the clients will
  * receive the change by the same sequence in which the server has.
  * But it is impossible predict the change acceptance time in the remote
  * clients because of networks limits.
  * @param parent
  * @param child
  * @throws PlanckDBException
  * @throws TransactionException
  */
 public void AddArcToNode(Node parent, Node child, byte[] arcName)
 {
     if(arcName==null){
        throw new PlanckDBException(CommandStatus.arcNameCanNotBeNull,"Unsupported state arc name can't be null");
     }
     Command command = commandBuilder.buildAddChildToParentNode(parent.getId(), child.getId(),arcName,sessionMetaData.getSchemaId(), core.getCoreManager().getKey(),sessionMetaData.GetSessionId(),lockTimeout);
     commandExecutor.produce(command);
 }
Пример #2
0
 public void addAttributes(Node node,params NodeAttribute[] attributes)
 {
     if(transaction!=null){
         transaction.addAttributes(node, attributes);
     }else{
         validateSchemaEntity(node);
         int schemaId=sessionMetaData.getSchemaId();
         int coreManagerId=core.getCoreManager().getKey();
         int sessionId=core.getSessionMetaData().GetSessionId();
         Command command = commandBuilder.buildAddAttributes(node.getId(), schemaId, coreManagerId, sessionId, schemaAdapter.getLockTimeout(), attributes);
         List<NodeAttribute> values = node.getAttributes().values();
         command.Push(PlanckDBConstants.OLD_ATTRIBUTES, PlanckDBConstants.ATTRIBUTE_MAP, values.ToArray());
         command.Push(PlanckDBConstants.TRANSACTION, PlanckDBConstants.INTEGER, PlanckDBConstants.TRUE);
         commandExecutor.consume(command);
         commandsList.Add(command);
     }
 }
Пример #3
0
 /**
  * addAttribute method;
  * Add attribute
  * using this method ensure that the server has have accepted
  * the attribute addition, and it also ensure that all the clients will
  * receive the change by the same sequence in which the server has.
  * But it is impossible predict the change acceptance time in the remote
  * clients because of networks limits.
  * @param node
  * @param attributes
  * @throws AbstractPlanckDBException
  */
 public void AddAttributes(Node node, NodeAttribute[] attributes)
 {
     List<NodeAttribute> values = node.getAttributes().values();
     NodeAttribute[] oldAttributes = values.ToArray();
     Command command = commandBuilder.buildAddAttributes(node.getId(), sessionMetaData.getSchemaId(), core.getCoreManager().getKey(), sessionMetaData.GetSessionId(), lockTimeout ,attributes);
     command.Push(PlanckDBConstants.OLD_ATTRIBUTES,PlanckDBConstants.ATTRIBUTE_MAP,oldAttributes);
     commandExecutor.produce(command);
 }
Пример #4
0
 /**
  * removeArc method
  * Remove arc from node
  * using this method ensure that the server has have accepted
  * the arc removal, and it also ensure that all the clients will
  * receive the change by the same sequence in which the server has.
  * But it is impossible predict the change acceptance time in the remote
  * clients because of networks limits.
  * @param parent
  * @param arcName
  * @throws AbstractPlanckDBException
  */
 public void RemoveArcFromNode(Node parent, byte[] arcName)
 {
     Arc arc=parent.getChildArc(arcName);
     if(arc!=null){
         Command command = commandBuilder.buildRemoveChildFromParentNode(parent.getId(),arcName, arc.getChild().getId(),sessionMetaData.getSchemaId(), core.getCoreManager().getKey(),sessionMetaData.GetSessionId(),lockTimeout);
         commandExecutor.produce(command);
     }else{
         throw new PlanckDBException(CommandStatus.noChildParentReference,CommandStatus.noChildParentReference.getMessage());
     }
 }
Пример #5
0
 /**
  * deleteNode method;
  * Delete node
  * Deleting node by this method ensure that the server has have accepted
  * the node deletion, and it also ensure that all the clients will
  * receive the change by the same sequence in which the server has.
  * But it is impossible predict the change acceptance time in the remote
  * clients because of networks limits.
  * @param node
  * @throws PlanckDBException
  * @throws TransactionException
  */
 public void DeleteNode(Node node)
 {
     List<NodeAttribute> values = node.getAttributes().values();
     NodeAttribute[] oldAttributes =values.ToArray();
     Command command = commandBuilder.buildDeleteNode(node.getId(), node.getLock(), sessionMetaData.getSchemaId(), core.getCoreManager().getKey(), sessionMetaData.GetSessionId(),  lockTimeout,oldAttributes);
     commandExecutor.produce(command);
 }
Пример #6
0
 /**
  * addArcToNode method
  * Like the non transactional schemaAdapter in addArcToNode entity,
  * but unlike the non transactional schemaAdapter, this schemaAdapter commits its changes directly on the entities
  * It also accumulate the command in order to rollback the changes in the client or commit the changes in the server
  * @param parent
  * @param child
  * @throws PlanckDBException
  * @throws TransactionException
  * @throws EntityLockException
  */
 public void addChildToParent(Node parent, Node child,byte[] arcName)
 {
     if(transaction!=null){
         transaction.addChildToParent(parent, child, arcName);
     }else{
         validateSchemaEntity(parent);
         validateSchemaEntity(child);
         int schemaId=sessionMetaData.getSchemaId();
         int coreManagerId=core.getCoreManager().getKey();
         int sessionId=core.getSessionMetaData().GetSessionId();
         Command command = commandBuilder.buildAddChildToParentNode(parent.getId(), child.getId(), arcName, schemaId, coreManagerId, sessionId, schemaAdapter.getLockTimeout());
         command.Push(PlanckDBConstants.TRANSACTION, PlanckDBConstants.INTEGER, PlanckDBConstants.TRUE);
         commandExecutor.consume(command);
         commandsList.Add(command);
     }
 }
Пример #7
0
 private bool isDirty(Node node)
 {
     return dirtySet.Contains(node.getId());
 }
Пример #8
0
 public void removeChildFromParent(Node parent, byte[] arcName)
 {
     if(transaction!=null){
         transaction.removeChildFromParent(parent, arcName);
     }else{
         validateSchemaEntity(parent);
         Arc arc = parent.getChildArc(arcName);
         if(arc!=null){
             int schemaId=sessionMetaData.getSchemaId();
             int coreManagerId=core.getCoreManager().getKey();
             int sessionId=core.getSessionMetaData().GetSessionId();
             Command command = commandBuilder.buildRemoveChildFromParentNode(parent.getId(),arc.getName(), arc.getChild().getId(), schemaId,coreManagerId, sessionId, schemaAdapter.getLockTimeout());
             command.Push(PlanckDBConstants.TRANSACTION, PlanckDBConstants.INTEGER, PlanckDBConstants.TRUE);
             commandExecutor.consume(command);
             commandsList.Add(command);
         }else{
             throw new PlanckDBException(CommandStatus.noChildParentReference,CommandStatus.noChildParentReference.getMessage());
         }
     }
 }