public int checkProcess() { int processType = 0; Clustering_Users_DAO dao = null; try { dao = new Clustering_Users_DAO(); dao.beginTransaction(); ClusterSchedule cs = dao.getLastClusterSchedule(); if (null != cs) { if (null == cs.StopTime) { processType = 1; } } dao.commitTransaction(); return(processType); } catch (Exception ex) { dao.rollbackTransaction(); throw ex; } }
public void updateClusterSchedule(ClusterSchedule clusterSchedule) { string strQuery = "UPDATE [RS].[CLUSTER_SCHEDULE_TBL] SET [StopTime] = GETDATE(),[Log] = @Log WHERE [ClusterType] = @ClusterType and [ScheduleID] in (SELECT TOP 1 [ScheduleID] FROM [RS].[CLUSTER_SCHEDULE_TBL] ORDER BY [StartTime] DESC)"; Dictionary <string, object> parameters = new Dictionary <string, object>(); parameters.Add("@Log", clusterSchedule.Log); parameters.Add("@ClusterType", clusterSchedule.ClusterType); executeNonQuery(strQuery, parameters); }
public void addNewClusterSchedule(ClusterSchedule clusterSchedule) { string strQuery = "INSERT INTO [RS].[CLUSTER_SCHEDULE_TBL] ([ClusterType] ,[StartTime] ,[Log] ,[Algorithm],[LoginID]) VALUES (@ClusterType , GETDATE() , @Log , @Algorithm, @LoginID) "; Dictionary <string, object> parameters = new Dictionary <string, object>(); parameters.Add("@ClusterType", clusterSchedule.ClusterType); parameters.Add("@Algorithm", clusterSchedule.Algorithm); parameters.Add("@Log", clusterSchedule.Log); parameters.Add("@LoginID", clusterSchedule.LoginID); executeNonQuery(strQuery, parameters); }
public ClusterSchedule getLastClusterSchedule() { string strSelect = "SELECT TOP 1 [ScheduleID] ,[ClusterType] ,[StartTime] ,[StopTime] ,[Log] , [Algorithm], [LoginID] FROM [RS].[CLUSTER_SCHEDULE_TBL] ORDER BY [StartTime] DESC"; SqlDataReader dr = executeReader(strSelect); ClusterSchedule clusterSchedule = null; while (dr.Read()) { clusterSchedule = new ClusterSchedule(); clusterSchedule.ScheduleID = dr.GetInt32(dr.GetOrdinal("ScheduleID")); clusterSchedule.ClusterType = dr.GetString(dr.GetOrdinal("ClusterType")); clusterSchedule.StartTime = dr.GetDateTime(dr.GetOrdinal("StartTime")); clusterSchedule.StopTime = dr.GetDateTime(dr.GetOrdinal("StopTime")); clusterSchedule.Log = dr.GetString(dr.GetOrdinal("Log")); clusterSchedule.Algorithm = dr.GetString(dr.GetOrdinal("Algorithm")); clusterSchedule.LoginID = dr.GetString(dr.GetOrdinal("LoginID")); } dr.Close(); return(clusterSchedule); }
public List <ClusterSchedule> getListClusterSchedule_DESC() { string strSelect = "SELECT [ScheduleID] ,[ClusterType] ,[StartTime] ,[StopTime] ,[Log] ,[Algorithm] ,[LoginID] FROM [RS].[CLUSTER_SCHEDULE_TBL] order by [StartTime] ASC"; SqlDataReader dr = executeReader(strSelect); List <ClusterSchedule> list = new List <ClusterSchedule>(); while (dr.Read()) { ClusterSchedule obj = new ClusterSchedule(); obj.ScheduleID = dr.GetInt32(dr.GetOrdinal("ScheduleID")); obj.ClusterType = dr.GetString(dr.GetOrdinal("ClusterType")); obj.StartTime = dr.GetDateTime(dr.GetOrdinal("StartTime")); obj.StopTime = (dr["StopTime"] == System.DBNull.Value) ? (DateTime?)null : dr.GetDateTime(dr.GetOrdinal("StopTime")); obj.Log = dr.GetString(dr.GetOrdinal("Log")); obj.Algorithm = dr.GetString(dr.GetOrdinal("Algorithm")); obj.LoginID = dr.GetString(dr.GetOrdinal("LoginID")); list.Add(obj); } dr.Close(); return(list); }
public int updateCluster_AllTransac(string LoginID) { Clustering_Users_DAO dao = null; ClusterSchedule clusterSchedule = new ClusterSchedule(); clusterSchedule.ClusterType = ConstantValues.SCHE_CLUSTER_USER; clusterSchedule.Log = String.Empty; clusterSchedule.LoginID = LoginID; int processType = checkProcess(); if (processType == 0) { Settings st = getSettings(); try { dao = new Clustering_Users_DAO(); dao.beginTransaction(); dao.CLEAN_RATTING_MATRIX(); // Add to Schedule clusterSchedule.Algorithm = Cluster.CLUSTER_KMEAN;//st.cluster_type; dao.addNewClusterSchedule(clusterSchedule); string log = "Update Cluster\n"; List <User_SubCategories> list_User_SubCategories = dao.getAllSubCategories(); bool existTransac = false; List <string> list_User_Categories = get_User_Categories(list_User_SubCategories); List <List <User_SubCategories> > list_SMALL_User_SubCategories = new List <List <User_SubCategories> >(); foreach (var item in list_User_Categories) { list_SMALL_User_SubCategories.Add(new List <User_SubCategories>()); } #region Cluster For LargeGroup int numUser = 0; Dictionary <string, int> allItem = new Dictionary <string, int>(); foreach (var item in list_User_SubCategories) { string categoryName = item.U_SubCategoryID + "_"; // Convert Data to matrix Dictionary <string, int> dic_users = new Dictionary <string, int>(); Dictionary <string, int> dic_items = new Dictionary <string, int>(); // Compute ratting matrix List <MatrixItem> lstRMI = dao.computeRattingMatrixItem(item.U_SubCategoryID, st.Alpha); double[][] x = Util.toMatrix_MatrixItem(lstRMI, out dic_users, out dic_items); if (dic_users.Count > st.U_M / 4) { numUser += dic_users.Count; foreach (var it in dic_items) { if (!allItem.ContainsKey(it.Key)) { allItem.Add(it.Key, it.Value); } } List <Partion> listPartion = getPartion_ByU_SubCategoryID(dao, item.U_SubCategoryID); LoadUpdateCluster(dao, st, ref existTransac, listPartion, item.U_SubCategoryID, categoryName, dic_users, dic_items, x); } else { add_SMALL_User_SubCategories(list_User_Categories, ref list_SMALL_User_SubCategories, item); } } #endregion #region Cluster For SmallGroup dao.delete_USER_CLUSTER_TBL_2("_MERGE_"); dao.delete_PARTION_TBL_2("_MERGE_"); dao.delete_USER_CENTROID_TBL_2("_MERGE_"); for (int i = 0; i < list_User_Categories.Count; i++) { string categoryName = list_User_Categories[i] + "_MERGE_"; // Convert Data to matrix Dictionary <string, int> dic_users = new Dictionary <string, int>(); Dictionary <string, int> dic_items = new Dictionary <string, int>(); // Compute ratting matrix List <MatrixItem> lstRMI = new List <MatrixItem>(); foreach (User_SubCategories item in list_SMALL_User_SubCategories[i]) { lstRMI.AddRange(dao.computeRattingMatrixItem(item.U_SubCategoryID, st.Alpha)); } double[][] x = Util.toMatrix_MatrixItem(lstRMI, out dic_users, out dic_items); numUser += dic_users.Count; foreach (var it in dic_items) { if (!allItem.ContainsKey(it.Key)) { allItem.Add(it.Key, it.Value); } } List <Partion> listPartion = getPartion_ByU_CategoryID_ForMergeGreoup(dao, categoryName); LoadUpdateCluster(dao, st, ref existTransac, listPartion, list_User_Categories[i], categoryName, dic_users, dic_items, x); } #endregion // Remove Excess USER_CLUSTER_TBL dao.removeExcessCluster(); // Update USE_TBL if (existTransac) { updateTransac_CheckPoint(); } log += "\t Number of Client Categories: " + list_User_SubCategories.Count; log += "\t Number of Client : " + numUser; log += "\t Number of Item : " + allItem.Count; log += "\n Parameter: \n"; log += "\t Algorithm:" + st.cluster_type; log += "\t Epsilon :" + st.epsilon; log += "\t Max Loop :" + st.maxLoop; // Update Success Schedule clusterSchedule.Log = log; dao.updateClusterSchedule(clusterSchedule); dao.commitTransaction(); } catch (Exception ex) { dao.rollbackTransaction(); throw ex; } } return(processType); }