示例#1
0
        public Brain(MyCaffeControl <T> mycaffe, PropertySet properties, CryptoRandom random, Phase phase)
        {
            m_mycaffe    = mycaffe;
            m_net        = mycaffe.GetInternalNet(phase);
            m_solver     = mycaffe.GetInternalSolver();
            m_properties = properties;
            m_random     = random;

            m_memData = m_net.FindLayer(LayerParameter.LayerType.MEMORYDATA, null) as MemoryDataLayer <T>;
            m_memLoss = m_net.FindLayer(LayerParameter.LayerType.MEMORY_LOSS, null) as MemoryLossLayer <T>;
            SoftmaxLayer <T> softmax = m_net.FindLayer(LayerParameter.LayerType.SOFTMAX, null) as SoftmaxLayer <T>;

            if (softmax != null)
            {
                throw new Exception("The PG.SIMPLE trainer does not support the Softmax layer, use the 'PG.ST' or 'PG.MT' trainer instead.");
            }

            if (m_memData == null)
            {
                throw new Exception("Could not find the MemoryData Layer!");
            }

            if (m_memLoss == null)
            {
                throw new Exception("Could not find the MemoryLoss Layer!");
            }

            m_memLoss.OnGetLoss += memLoss_OnGetLoss;

            m_blobDiscountedR    = new Blob <T>(mycaffe.Cuda, mycaffe.Log);
            m_blobPolicyGradient = new Blob <T>(mycaffe.Cuda, mycaffe.Log);

            m_nMiniBatch = mycaffe.CurrentProject.GetBatchSize(phase);
        }
示例#2
0
    public void Start()
    {
        var datalayer = new MemoryDataLayer();

        //初始化游戏基础系统
        GameFoundation.Initialize(datalayer, OnloadFinish, OnInitFailed);
    }
示例#3
0
        public Brain(MyCaffeControl <T> mycaffe, PropertySet properties, CryptoRandom random, Phase phase)
        {
            m_mycaffe    = mycaffe;
            m_net        = mycaffe.GetInternalNet(phase);
            m_solver     = mycaffe.GetInternalSolver();
            m_properties = properties;
            m_random     = random;

            m_memData = m_net.FindLayer(LayerParameter.LayerType.MEMORYDATA, null) as MemoryDataLayer <T>;
            m_memLoss = m_net.FindLayer(LayerParameter.LayerType.MEMORY_LOSS, null) as MemoryLossLayer <T>;
            m_softmax = m_net.FindLayer(LayerParameter.LayerType.SOFTMAX, null) as SoftmaxLayer <T>;

            if (m_memData == null)
            {
                throw new Exception("Could not find the MemoryData Layer!");
            }

            if (m_memLoss == null)
            {
                throw new Exception("Could not find the MemoryLoss Layer!");
            }

            m_memData.OnDataPack += memData_OnDataPack;
            m_memLoss.OnGetLoss  += memLoss_OnGetLoss;

            m_blobDiscountedR     = new Blob <T>(mycaffe.Cuda, mycaffe.Log);
            m_blobPolicyGradient  = new Blob <T>(mycaffe.Cuda, mycaffe.Log);
            m_blobActionOneHot    = new Blob <T>(mycaffe.Cuda, mycaffe.Log);
            m_blobDiscountedR1    = new Blob <T>(mycaffe.Cuda, mycaffe.Log);
            m_blobPolicyGradient1 = new Blob <T>(mycaffe.Cuda, mycaffe.Log);
            m_blobActionOneHot1   = new Blob <T>(mycaffe.Cuda, mycaffe.Log);
            m_blobLoss            = new Blob <T>(mycaffe.Cuda, mycaffe.Log);
            m_blobAprobLogit      = new Blob <T>(mycaffe.Cuda, mycaffe.Log);

            if (m_softmax != null)
            {
                LayerParameter p = new LayerParameter(LayerParameter.LayerType.SOFTMAXCROSSENTROPY_LOSS);
                p.loss_weight.Add(1);
                p.loss_weight.Add(0);
                p.loss_param.normalization = LossParameter.NormalizationMode.NONE;
                m_softmaxCe = new SoftmaxCrossEntropyLossLayer <T>(mycaffe.Cuda, mycaffe.Log, p);
            }

            m_colAccumulatedGradients = m_net.learnable_parameters.Clone();
            m_colAccumulatedGradients.SetDiff(0);

            int nMiniBatch = mycaffe.CurrentProject.GetBatchSize(phase);

            if (nMiniBatch != 0)
            {
                m_nMiniBatch = nMiniBatch;
            }

            m_nMiniBatch = m_properties.GetPropertyAsInt("MiniBatch", m_nMiniBatch);
        }
示例#4
0
        static MemoryDataLayer <float> createMemoryDataLayer(CudaDnn <float> cuda, Log log)
        {
            // Setup the MemoryDataLayer parameters.
            LayerParameter p = new LayerParameter(LayerParameter.LayerType.MEMORYDATA);

            p.memory_data_param.label_type = LayerParameterBase.LABEL_TYPE.SINGLE;
            p.memory_data_param.batch_size = 1;
            p.memory_data_param.channels   = 1;
            p.memory_data_param.height     = 1;
            p.memory_data_param.width      = 60;

            // Create the MemoryLayer.
            MemoryDataLayer <float> layer = Layer <float> .Create(cuda, log, p, null) as MemoryDataLayer <float>;

            return(layer);
        }
示例#5
0
        private void GameFoundationInitialization()
        {
            _memoryDataLayer = new MemoryDataLayer();

            if (!GameFoundation.IsInitialized)
            {
                GameFoundation.Initialize(_memoryDataLayer,
                                          () => { Debug.Log("Initializing GameFoundation Success EventBinding"); },
                                          e =>
                {
                    Debug.LogError(
                        $"Error in EventBinding GameFoundation Initialization \n {e.Source} \t {e.Message}");
                });
            }

            _mainWeapon = InventoryManager.CreateItem(equipement.mainWeaponRef);
        }
示例#6
0
        static void Main(string[] args)
        {
            // Create the output log used.
            Log log = new Log("Test");
            // Create the CudaDnn connection used.  NOTE: only one CudaDnn connection is needed
            // per thread for each instance creates and manages its own low-level kernel state
            // which includes all memory allocated etc.  All memory handles allocated should
            // be used with the CudaDnn that allocated the memory.
            CudaDnn <float>         cuda      = new CudaDnn <float>(0, DEVINIT.CUBLAS | DEVINIT.CURAND);
            MemoryDataLayer <float> layer     = createMemoryDataLayer(cuda, log);
            List <Datum>            rgData    = dataSetter();
            Blob <float>            blobData  = new Blob <float>(cuda, log);
            Blob <float>            blobLabel = new Blob <float>(cuda, log);
            BlobCollection <float>  colBottom = new BlobCollection <float>();
            BlobCollection <float>  colTop    = new BlobCollection <float>();

            // Set the top blob for MemoryDataLayers only have tops (e.g. no bottoms).
            colTop.Add(blobData);
            colTop.Add(blobLabel);

            layer.Setup(colBottom, colTop);
            layer.AddDatumVector(rgData);


            // Run Pass 1 - memory data layer advances intern index by batch size after forward completes.
            layer.Forward(colBottom, colTop);

            float[] rgDataPass1  = colTop[0].mutable_cpu_data;
            float[] rgLabelPass1 = colTop[1].mutable_cpu_data;

            log.CHECK_EQ(rgDataPass1.Length, 60, "There should be 60 data items.");
            for (int i = 0; i < rgDataPass1.Length; i++)
            {
                log.CHECK_EQ(rgDataPass1[i], 10, "The data value should = 10.");
            }

            log.CHECK_EQ(rgLabelPass1.Length, 1, "There should only be one label, for the batch size = 1.");
            log.CHECK_EQ(rgLabelPass1[0], 0, "The label of the first item should = 0.");
            Console.WriteLine("First Pass - label = " + rgLabelPass1[0].ToString());


            // Pass 2 - memory data layer advances intern index by batch size after forward completes.
            layer.Forward(colBottom, colTop);

            float[] rgDataPass2  = colTop[0].mutable_cpu_data;
            float[] rgLabelPass2 = colTop[1].mutable_cpu_data;

            log.CHECK_EQ(rgDataPass2.Length, 60, "There should be 60 data items.");
            for (int i = 0; i < rgDataPass2.Length; i++)
            {
                log.CHECK_EQ(rgDataPass2[i], 10, "The data value should = 10.");
            }

            log.CHECK_EQ(rgLabelPass2.Length, 1, "There should only be one label, for the batch size = 1.");
            log.CHECK_EQ(rgLabelPass2[0], 1, "The label of the first item should = 1.");
            Console.WriteLine("Second Pass - label = " + rgLabelPass2[0].ToString());

            // Pass 3 - memory data layer advances intern index by batch size after forward completes.
            layer.Forward(colBottom, colTop);

            float[] rgDataPass3  = colTop[0].mutable_cpu_data;
            float[] rgLabelPass3 = colTop[1].mutable_cpu_data;

            log.CHECK_EQ(rgDataPass3.Length, 60, "There should be 60 data items.");
            for (int i = 0; i < rgDataPass3.Length; i++)
            {
                log.CHECK_EQ(rgDataPass3[i], 10, "The data value should = 10.");
            }

            log.CHECK_EQ(rgLabelPass3.Length, 1, "There should only be one label, for the batch size = 1.");
            log.CHECK_EQ(rgLabelPass3[0], 2, "The label of the first item should = 2.");
            Console.WriteLine("Third Pass - label = " + rgLabelPass3[0].ToString());

            layer.Dispose();
            blobData.Dispose();
            blobLabel.Dispose();
            cuda.Dispose();

            Console.WriteLine("Press any key...");
            Console.ReadKey();
        }