Exemple #1
0
 public void AddArcToNode(Node parent, Node child, String arcName)
 {
     validate(parent,child);
     Encoding encoding = DBProperties.getEncoding();
     byte[] array=encoding.GetBytes(arcName);
     AddArcToNode(parent,child,array);
 }
Exemple #2
0
 /**
  * addArcToNode method;
  * Add arc toNode
  * using this method ensure that the server has have accepted
  * the arc addition, and it also ensure that all the clients will
  * receive the change by the same sequence in which the server has.
  * But it is impossible predict the change acceptance time in the remote
  * clients because of networks limits.
  * @param parent
  * @param child
  * @throws PlanckDBException
  * @throws TransactionException
  */
 public void AddArcToNode(Node parent, Node child, byte[] arcName)
 {
     if(arcName==null){
        throw new PlanckDBException(CommandStatus.arcNameCanNotBeNull,"Unsupported state arc name can't be null");
     }
     Command command = commandBuilder.buildAddChildToParentNode(parent.getId(), child.getId(),arcName,sessionMetaData.getSchemaId(), core.getCoreManager().getKey(),sessionMetaData.GetSessionId(),lockTimeout);
     commandExecutor.produce(command);
 }
Exemple #3
0
 public void AddAttributes(Node node, params NodeAttribute[] attributes)
 {
     validate(node);
     try{
         reentrantLock.enter();
         if (transaction!=null){
             transaction.addAttributes(node, attributes);
         }else{
             schemaAdapter.AddAttributes(node, attributes);
         }
     }finally {
         reentrantLock.exit();
     }
 }
Exemple #4
0
 /**
  * addArcToNode method
  * Like the non transactional schemaAdapter in addArcToNode entity,
  * but unlike the non transactional schemaAdapter, this schemaAdapter commits its changes directly on the entities
  * It also accumulate the command in order to rollback the changes in the client or submit the changes in the server
  * @param parent
  * @param child
  * @throws PlanckDBException
  * @throws TransactionException
  */
 public void AddArcToNode(Node parent, Node child, byte[] arcName)
 {
     validate(parent,child);
     try{
         reentrantLock.enter();
         if (transaction!=null){
             transaction.addChildToParent(parent, child, arcName);
         }else{
             schemaAdapter.AddArcToNode(parent, child, arcName);
         }
     }finally {
         reentrantLock.exit();
     }
 }
Exemple #5
0
 public void addAttributes(Node node,params NodeAttribute[] attributes)
 {
     if(transaction!=null){
         transaction.addAttributes(node, attributes);
     }else{
         validateSchemaEntity(node);
         int schemaId=sessionMetaData.getSchemaId();
         int coreManagerId=core.getCoreManager().getKey();
         int sessionId=core.getSessionMetaData().GetSessionId();
         Command command = commandBuilder.buildAddAttributes(node.getId(), schemaId, coreManagerId, sessionId, schemaAdapter.getLockTimeout(), attributes);
         List<NodeAttribute> values = node.getAttributes().values();
         command.Push(PlanckDBConstants.OLD_ATTRIBUTES, PlanckDBConstants.ATTRIBUTE_MAP, values.ToArray());
         command.Push(PlanckDBConstants.TRANSACTION, PlanckDBConstants.INTEGER, PlanckDBConstants.TRUE);
         commandExecutor.consume(command);
         commandsList.Add(command);
     }
 }
Exemple #6
0
 /**
  * addAttribute method;
  * Add attribute
  * using this method ensure that the server has have accepted
  * the attribute addition, and it also ensure that all the clients will
  * receive the change by the same sequence in which the server has.
  * But it is impossible predict the change acceptance time in the remote
  * clients because of networks limits.
  * @param node
  * @param attributes
  * @throws AbstractPlanckDBException
  */
 public void AddAttributes(Node node, NodeAttribute[] attributes)
 {
     List<NodeAttribute> values = node.getAttributes().values();
     NodeAttribute[] oldAttributes = values.ToArray();
     Command command = commandBuilder.buildAddAttributes(node.getId(), sessionMetaData.getSchemaId(), core.getCoreManager().getKey(), sessionMetaData.GetSessionId(), lockTimeout ,attributes);
     command.Push(PlanckDBConstants.OLD_ATTRIBUTES,PlanckDBConstants.ATTRIBUTE_MAP,oldAttributes);
     commandExecutor.produce(command);
 }
Exemple #7
0
 /**
  * removeArc method
  * Remove arc from node
  * using this method ensure that the server has have accepted
  * the arc removal, and it also ensure that all the clients will
  * receive the change by the same sequence in which the server has.
  * But it is impossible predict the change acceptance time in the remote
  * clients because of networks limits.
  * @param parent
  * @param arcName
  * @throws AbstractPlanckDBException
  */
 public void RemoveArcFromNode(Node parent, byte[] arcName)
 {
     Arc arc=parent.getChildArc(arcName);
     if(arc!=null){
         Command command = commandBuilder.buildRemoveChildFromParentNode(parent.getId(),arcName, arc.getChild().getId(),sessionMetaData.getSchemaId(), core.getCoreManager().getKey(),sessionMetaData.GetSessionId(),lockTimeout);
         commandExecutor.produce(command);
     }else{
         throw new PlanckDBException(CommandStatus.noChildParentReference,CommandStatus.noChildParentReference.getMessage());
     }
 }
Exemple #8
0
 public void removeChildFromParent(Node parent, byte[] arcName)
 {
     if(transaction!=null){
         transaction.removeChildFromParent(parent, arcName);
     }else{
         validateSchemaEntity(parent);
         Arc arc = parent.getChildArc(arcName);
         if(arc!=null){
             int schemaId=sessionMetaData.getSchemaId();
             int coreManagerId=core.getCoreManager().getKey();
             int sessionId=core.getSessionMetaData().GetSessionId();
             Command command = commandBuilder.buildRemoveChildFromParentNode(parent.getId(),arc.getName(), arc.getChild().getId(), schemaId,coreManagerId, sessionId, schemaAdapter.getLockTimeout());
             command.Push(PlanckDBConstants.TRANSACTION, PlanckDBConstants.INTEGER, PlanckDBConstants.TRUE);
             commandExecutor.consume(command);
             commandsList.Add(command);
         }else{
             throw new PlanckDBException(CommandStatus.noChildParentReference,CommandStatus.noChildParentReference.getMessage());
         }
     }
 }
Exemple #9
0
 public void RemoveArcFromNode(Node parent, byte[] arcName)
 {
     validate(parent);
     try{
         reentrantLock.enter();
         if (transaction!=null){
             transaction.removeChildFromParent(parent, arcName);
         }else{
             schemaAdapter.RemoveArcFromNode(parent, arcName);
         }
     }finally {
         reentrantLock.exit();
     }
 }
Exemple #10
0
 private static void collectSuccessors(Node node, HashSet<Node> result)
 {
     result.Add(node);
     foreach (Arc arc in node.getChildrenArcs()) {
         collectSuccessors(arc.getChild(), result);
     }
 }
Exemple #11
0
        private static void collectAncestors(Node node, HashSet<Node> result)
        {
            result.Add(node);
            foreach (Arc arc in node.GetParentsArcs()) {
                collectAncestors(arc.getParent(), result);

            }
        }
Exemple #12
0
 /**
  * addArcToNode method
  * Like the non transactional schemaAdapter in addArcToNode entity,
  * but unlike the non transactional schemaAdapter, this schemaAdapter commits its changes directly on the entities
  * It also accumulate the command in order to rollback the changes in the client or commit the changes in the server
  * @param parent
  * @param child
  * @throws PlanckDBException
  * @throws TransactionException
  * @throws EntityLockException
  */
 public void addChildToParent(Node parent, Node child,byte[] arcName)
 {
     if(transaction!=null){
         transaction.addChildToParent(parent, child, arcName);
     }else{
         validateSchemaEntity(parent);
         validateSchemaEntity(child);
         int schemaId=sessionMetaData.getSchemaId();
         int coreManagerId=core.getCoreManager().getKey();
         int sessionId=core.getSessionMetaData().GetSessionId();
         Command command = commandBuilder.buildAddChildToParentNode(parent.getId(), child.getId(), arcName, schemaId, coreManagerId, sessionId, schemaAdapter.getLockTimeout());
         command.Push(PlanckDBConstants.TRANSACTION, PlanckDBConstants.INTEGER, PlanckDBConstants.TRUE);
         commandExecutor.consume(command);
         commandsList.Add(command);
     }
 }
Exemple #13
0
 private void validateSchemaEntity(Node node)
 {
     int sessionId=core.getSessionMetaData().GetSessionId();
     if(node.getSessionId()!=sessionId){
         throw new PlanckDBException(CommandStatus.canNotMixEntitiesFromTwoDifferentSchemas, CommandStatus.canNotMixEntitiesFromTwoDifferentSchemas.getMessage());
     }
     if(isDirty(node)){
         throw new PlanckDBException(CommandStatus.entityHasChangedExternally, CommandStatus.entityHasChangedExternally.getMessage());
     }
 }
Exemple #14
0
 private bool isDirty(Node node)
 {
     return dirtySet.Contains(node.getId());
 }
Exemple #15
0
 /**
  * rollback method
  * Reverts the accumulated commands in order to revert the changes that have been done in the transaction
  * @throws PlanckDBException
  * @throws TransactionException
  * @throws EntityLockException
  */
 public Transaction rollback()
 {
     if(transaction!=null){
         transaction=transaction.rollback();
         return this;
     }else{
         try{
             int listSize=commandsList.Count;
             for (int i = listSize-1; i >=0; i--) {
                 Command command = commandsList.ElementAt(i);
                 Command revertedCommand= commandBuilder.revertCommand(command);
                 commandExecutor.consume(revertedCommand);
             }
             List<Node> persistedNodes=new List<Node>();
             foreach (Int32 nodeId in transactionEntities) {
                 if(nodeId>=PlanckDBConstants.MIN_ENTITY_ID){
                     Node node = registry.getNode(nodeId);
                     persistedNodes.Add(node);
                 }
             }
             Node[] nodesToUnlock = new Node[persistedNodes.Count];
             for (int j = 0; j < nodesToUnlock.Length; j++) {
                 Node node = persistedNodes.ElementAt(j);
                 nodesToUnlock[j]=node;
             }
             schemaAdapter.LockNodes(false, false, nodesToUnlock);
             return null;
         }finally {
             commandExecutor.unSubscribeTransaction(this);
             transactionEntities.Clear();
             commandsList.Clear();
             dirtySet.Clear();
         }
     }
 }
Exemple #16
0
 /**
  * Create instance of Arc
  * @param parent
  * @param child
  * @param name
  */
 internal Arc(Node parent, Node child, byte[] name)
 {
     this.parent = parent;
     this.child = child;
     this.name = name;
 }
Exemple #17
0
 /**
  * deleteNode method
  * Like the non transactional schemaAdapter in deleteNode entity,
  * but unlike the non transactional schemaAdapter, this schemaAdapter commits its changes directly on the entities
  * It also accumulate the command in order to rollback the changes in the client or submit the changes in the server
  * @param node
  * @throws PlanckDBException
  * @throws TransactionException
  */
 public void DeleteNode(Node node)
 {
     validate(node);
     try{
         reentrantLock.enter();
         if (transaction!=null){
             transaction.deleteNode(node);
         }else{
             schemaAdapter.DeleteNode(node);
         }
     }finally {
         reentrantLock.exit();
     }
 }
Exemple #18
0
 /**
      * Create new node.
      * @param entityId
      * @param lock
      * @param sessionId
      * @param ownerId
      * @param attributes
      * @return
      */
 public Node createNewNode(int entityId, bool lockEntity,int sessionId,int ownerId,NodeAttribute[]attributes)
 {
     try{
             reentrantLock.enter();
             Node node=new Node(entityId, lockEntity,sessionId,ownerId,attributes);
             idMap[entityId]=node;
             return node;
         }finally {
             reentrantLock.exit();
         }
 }
Exemple #19
0
 public void RemoveArcFromNode(Node parent, String arcName)
 {
     validate(parent);
     Encoding encoding = DBProperties.getEncoding();
     byte[] array=encoding.GetBytes(arcName);
     RemoveArcFromNode(parent,array);
 }
Exemple #20
0
 /**
  * deleteNode method;
  * Delete node
  * Deleting node by this method ensure that the server has have accepted
  * the node deletion, and it also ensure that all the clients will
  * receive the change by the same sequence in which the server has.
  * But it is impossible predict the change acceptance time in the remote
  * clients because of networks limits.
  * @param node
  * @throws PlanckDBException
  * @throws TransactionException
  */
 public void DeleteNode(Node node)
 {
     List<NodeAttribute> values = node.getAttributes().values();
     NodeAttribute[] oldAttributes =values.ToArray();
     Command command = commandBuilder.buildDeleteNode(node.getId(), node.getLock(), sessionMetaData.getSchemaId(), core.getCoreManager().getKey(), sessionMetaData.GetSessionId(),  lockTimeout,oldAttributes);
     commandExecutor.produce(command);
 }
Exemple #21
0
        /**
         * lockNodes methods
         * Locks node
         * This method locks the "nodesToLock" from being changed by non
         * lock owners clients.
         * Any client that will try to change locked node will throw :
         * "PlanckDBException" with status= "non lock owner"
         * Note that successfully call to the lockNodes methods means that
         * the locked nodes are locked in the server side.
         * @param lock
         * @param force
         *@param nodesToLock  @throws SerializationException
         * @throws PlanckDBException
         * @throws TransactionException
         */
        public void LockNodes(bool lockEntity, bool forceLock, Node[] nodesToLock)
        {
            List<Command> list=new List<Command>();
            foreach (Node node in nodesToLock) {
                // Can't lock null ... (just if some body accidentally send null in the methods parameters )
                if(node==null){
                    continue;
                }

                // This is an entity which was created in the transaction so we can't lock it ( the node is not commited to server )
                if(node.getId()<PlanckDBConstants.MIN_ENTITY_ID){
                    continue;
                }

                // Don't send request for lock if the session owns the node and vice versa.
                if(node.getLock()==lockEntity&&node.getOwnerId()==sessionMetaData.GetSessionId()){
                    continue;
                }
                Command internalCommand=commandBuilder.createLockCommand(node.getId(),core.getCoreManager().getKey(),core.getSessionMetaData().getSchemaId(),core.getSessionMetaData().GetSessionId(),lockEntity,forceLock,false,lockTimeout);
                list.Add(internalCommand);
            }
            if(list.Count>0){
                Command command = commandBuilder.buildAtomicModelCommands(list, core.getSessionMetaData().getSchemaId(), core.getCoreManager().getKey(),core.getSessionMetaData().GetSessionId(),lockTimeout);
                commandExecutor.produce(command);
            }
        }
Exemple #22
0
 /**
  * open method
  * Opens transaction
  * In order to use existing nodes inside transaction the node must be locked and owned by the transaction
  * Using none locked node will throw TransactionException
  * New nodes  which where created inside transaction will be locked in the local schema automatically
  * Changes in the transaction are absorbed in the local schema only.
  * In order to distrib the changes to the server and clients commit transaction must be done
  * @param nodesToLock
  * @throws TransactionException
  * @throws PlanckDBException
  * @throws EntityLockException
  */
 public void Open(Node[] nodesToLock)
 {
     throw new PlanckDBException(CommandStatus.unsupported,"Shouln't reach to this point, SchemaAdapter doesn't support transactions");
 }
Exemple #23
0
 /**
  * addChild<p>
  * The method will create new arc which unique name is @param arcName<p>
  * the method is not accessible to the storage user;<p>
  * if the storage user wants to add arc to node he must do the change by the schema;<p>
  * @param child
  * @param arcName
  * @return
  */
 internal Arc addChild(Node child,byte[] arcName)
 {
     Arc arc=new Arc(this,child,arcName);
        this.children.put(arcName,arc);
        child.parents.Add(arc);
     return arc;
 }
Exemple #24
0
 public void removeChildFromParent(Node parent, String arcName)
 {
     Encoding encoding = DBProperties.getEncoding();
      byte[] array=encoding.GetBytes(arcName);
      removeChildFromParent(parent,array);
 }