[PATCH 17/73] staging/lustre: use 64-bit time for pl_recalc

green at linuxhacker.ru green at linuxhacker.ru
Sun Sep 27 20:45:17 UTC 2015


From: Arnd Bergmann <arnd at arndb.de>

The ldlm pool calculates elapsed time by comparing the previous and
current get_seconds() values, which is unsafe on 32-bit machines
after 2038.

This changes the code to use time64_t and ktime_get_real_seconds(),
keeping the 'real' instead of 'monotonic' time because of the
debug prints.

Signed-off-by: Arnd Bergmann <arnd at arndb.de>
Signed-off-by: Oleg Drokin <green at linuxhacker.ru>
---
 drivers/staging/lustre/lustre/include/lustre_dlm.h |  4 +--
 drivers/staging/lustre/lustre/ldlm/ldlm_pool.c     | 30 +++++++++++-----------
 2 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 796a997..1ac08e1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -256,9 +256,9 @@ struct ldlm_pool {
 	 *  server_slv * lock_volume_factor. */
 	atomic_t		pl_lock_volume_factor;
 	/** Time when last SLV from server was obtained. */
-	time_t			pl_recalc_time;
+	time64_t		pl_recalc_time;
 	/** Recalculation period for pool. */
-	time_t			pl_recalc_period;
+	time64_t		pl_recalc_period;
 	/** Recalculation and shrink operations. */
 	const struct ldlm_pool_ops	*pl_ops;
 	/** Number of planned locks for next period. */
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index c234acb..1c9d67f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -330,14 +330,14 @@ static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
  */
 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
 {
-	time_t recalc_interval_sec;
+	time64_t recalc_interval_sec;
 
-	recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+	recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
 	if (recalc_interval_sec < pl->pl_recalc_period)
 		return 0;
 
 	spin_lock(&pl->pl_lock);
-	recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+	recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
 	if (recalc_interval_sec < pl->pl_recalc_period) {
 		spin_unlock(&pl->pl_lock);
 		return 0;
@@ -358,7 +358,7 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
 	 */
 	ldlm_pool_recalc_grant_plan(pl);
 
-	pl->pl_recalc_time = get_seconds();
+	pl->pl_recalc_time = ktime_get_real_seconds();
 	lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
 			    recalc_interval_sec);
 	spin_unlock(&pl->pl_lock);
@@ -467,10 +467,10 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
  */
 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
 {
-	time_t recalc_interval_sec;
+	time64_t recalc_interval_sec;
 	int ret;
 
-	recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+	recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
 	if (recalc_interval_sec < pl->pl_recalc_period)
 		return 0;
 
@@ -478,7 +478,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
 	/*
 	 * Check if we need to recalc lists now.
 	 */
-	recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+	recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
 	if (recalc_interval_sec < pl->pl_recalc_period) {
 		spin_unlock(&pl->pl_lock);
 		return 0;
@@ -513,7 +513,7 @@ out:
 	 * Time of LRU resizing might be longer than period,
 	 * so update after LRU resizing rather than before it.
 	 */
-	pl->pl_recalc_time = get_seconds();
+	pl->pl_recalc_time = ktime_get_real_seconds();
 	lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
 			    recalc_interval_sec);
 	spin_unlock(&pl->pl_lock);
@@ -571,10 +571,10 @@ static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
  */
 int ldlm_pool_recalc(struct ldlm_pool *pl)
 {
-	time_t recalc_interval_sec;
+	u32 recalc_interval_sec;
 	int count;
 
-	recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+	recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
 	if (recalc_interval_sec <= 0)
 		goto recalc;
 
@@ -599,14 +599,14 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
 		lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
 				    count);
 	}
-	recalc_interval_sec = pl->pl_recalc_time - get_seconds() +
+	recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
 			      pl->pl_recalc_period;
 	if (recalc_interval_sec <= 0) {
 		/* Prevent too frequent recalculation. */
-		CDEBUG(D_DLMTRACE, "Negative interval(%ld), "
-		       "too short period(%ld)",
+		CDEBUG(D_DLMTRACE,
+		       "Negative interval(%d), too short period(%lld)",
 		       recalc_interval_sec,
-		       pl->pl_recalc_period);
+		       (s64)pl->pl_recalc_period);
 		recalc_interval_sec = 1;
 	}
 
@@ -893,7 +893,7 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
 
 	spin_lock_init(&pl->pl_lock);
 	atomic_set(&pl->pl_granted, 0);
-	pl->pl_recalc_time = get_seconds();
+	pl->pl_recalc_time = ktime_get_seconds();
 	atomic_set(&pl->pl_lock_volume_factor, 1);
 
 	atomic_set(&pl->pl_grant_rate, 0);
-- 
2.1.0



More information about the devel mailing list