2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Version: $Id: ip_vs_lblcr.c,v 1.11 2002/09/15 08:14:08 wensong Exp $
6 * Authors: Wensong Zhang <wensong@gnuchina.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Julian Anastasov : Added the missing (dest->weight>0)
15 * condition in the ip_vs_dest_set_max.
20 * The lblc/r algorithm is as follows (pseudo code):
22 * if serverSet[dest_ip] is null then
23 * n, serverSet[dest_ip] <- {weighted least-conn node};
25 * n <- {least-conn (alive) node in serverSet[dest_ip]};
27 * (n.conns>n.weight AND
28 * there is a node m with m.conns<m.weight/2) then
29 * n <- {weighted least-conn node};
30 * add n to serverSet[dest_ip];
31 * if |serverSet[dest_ip]| > 1 AND
32 * now - serverSet[dest_ip].lastMod > T then
33 * m <- {most conn node in serverSet[dest_ip]};
34 * remove m from serverSet[dest_ip];
35 * if serverSet[dest_ip] changed then
36 * serverSet[dest_ip].lastMod <- now;
42 #include <linux/module.h>
43 #include <linux/kernel.h>
47 #include <linux/sysctl.h>
48 /* for proc_net_create/proc_net_remove */
49 #include <linux/proc_fs.h>
51 #include <net/ip_vs.h>
55 * It is for garbage collection of stale IPVS lblcr entries,
56 * when the table is full.
58 #define CHECK_EXPIRE_INTERVAL (60*HZ)
59 #define ENTRY_TIMEOUT (6*60*HZ)
62 * It is for full expiration check.
63 * When there is no partial expiration check (garbage collection)
64 * in a half hour, do a full expiration check to collect stale
65 * entries that haven't been touched for a day.
67 #define COUNT_FOR_FULL_EXPIRATION 30
68 static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
72 * for IPVS lblcr entry hash table
74 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
75 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
77 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
78 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
79 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
83 * IPVS destination set structure and operations
85 struct ip_vs_dest_list {
86 struct ip_vs_dest_list *next; /* list link */
87 struct ip_vs_dest *dest; /* destination server */
90 struct ip_vs_dest_set {
91 atomic_t size; /* set size */
92 unsigned long lastmod; /* last modified time */
93 struct ip_vs_dest_list *list; /* destination list */
94 rwlock_t lock; /* lock for this list */
98 static struct ip_vs_dest_list *
99 ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
101 struct ip_vs_dest_list *e;
103 for (e=set->list; e!=NULL; e=e->next) {
105 /* already existed */
109 e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC);
111 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
115 atomic_inc(&dest->refcnt);
118 /* link it to the list */
119 write_lock(&set->lock);
122 atomic_inc(&set->size);
123 write_unlock(&set->lock);
125 set->lastmod = jiffies;
130 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
132 struct ip_vs_dest_list *e, **ep;
134 write_lock(&set->lock);
135 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
136 if (e->dest == dest) {
139 atomic_dec(&set->size);
140 set->lastmod = jiffies;
141 atomic_dec(&e->dest->refcnt);
147 write_unlock(&set->lock);
150 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
152 struct ip_vs_dest_list *e, **ep;
154 write_lock(&set->lock);
155 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
158 * We don't kfree dest because it is refered either
159 * by its service or by the trash dest list.
161 atomic_dec(&e->dest->refcnt);
164 write_unlock(&set->lock);
167 /* get weighted least-connection node in the destination set */
168 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
170 register struct ip_vs_dest_list *e;
171 struct ip_vs_dest *dest, *least;
177 read_lock(&set->lock);
178 /* select the first destination server, whose weight > 0 */
179 for (e=set->list; e!=NULL; e=e->next) {
181 if (least->flags & IP_VS_DEST_F_OVERLOAD)
184 if ((atomic_read(&least->weight) > 0)
185 && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
186 loh = atomic_read(&least->activeconns) * 50
187 + atomic_read(&least->inactconns);
191 read_unlock(&set->lock);
194 /* find the destination with the weighted least load */
196 for (e=e->next; e!=NULL; e=e->next) {
198 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
201 doh = atomic_read(&dest->activeconns) * 50
202 + atomic_read(&dest->inactconns);
203 if ((loh * atomic_read(&dest->weight) >
204 doh * atomic_read(&least->weight))
205 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
210 read_unlock(&set->lock);
212 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
213 "activeconns %d refcnt %d weight %d overhead %d\n",
214 NIPQUAD(least->addr), ntohs(least->port),
215 atomic_read(&least->activeconns),
216 atomic_read(&least->refcnt),
217 atomic_read(&least->weight), loh);
222 /* get weighted most-connection node in the destination set */
223 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
225 register struct ip_vs_dest_list *e;
226 struct ip_vs_dest *dest, *most;
232 read_lock(&set->lock);
233 /* select the first destination server, whose weight > 0 */
234 for (e=set->list; e!=NULL; e=e->next) {
236 if (atomic_read(&most->weight) > 0) {
237 moh = atomic_read(&most->activeconns) * 50
238 + atomic_read(&most->inactconns);
242 read_unlock(&set->lock);
245 /* find the destination with the weighted most load */
247 for (e=e->next; e!=NULL; e=e->next) {
249 doh = atomic_read(&dest->activeconns) * 50
250 + atomic_read(&dest->inactconns);
251 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
252 if ((moh * atomic_read(&dest->weight) <
253 doh * atomic_read(&most->weight))
254 && (atomic_read(&dest->weight) > 0)) {
259 read_unlock(&set->lock);
261 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
262 "activeconns %d refcnt %d weight %d overhead %d\n",
263 NIPQUAD(most->addr), ntohs(most->port),
264 atomic_read(&most->activeconns),
265 atomic_read(&most->refcnt),
266 atomic_read(&most->weight), moh);
272 * IPVS lblcr entry represents an association between destination
273 * IP address and its destination server set
275 struct ip_vs_lblcr_entry {
276 struct list_head list;
277 __u32 addr; /* destination IP address */
278 struct ip_vs_dest_set set; /* destination server set */
279 unsigned long lastuse; /* last used time */
284 * IPVS lblcr hash table
286 struct ip_vs_lblcr_table {
287 rwlock_t lock; /* lock for this table */
288 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
289 atomic_t entries; /* number of entries */
290 int max_size; /* maximum size of entries */
291 struct timer_list periodic_timer; /* collect stale entries */
292 int rover; /* rover for expire check */
293 int counter; /* counter for no expire */
298 * IPVS LBLCR sysctl table
301 static ctl_table vs_vars_table[] = {
303 .ctl_name = NET_IPV4_VS_LBLCR_EXPIRE,
304 .procname = "lblcr_expiration",
305 .data = &sysctl_ip_vs_lblcr_expiration,
306 .maxlen = sizeof(int),
308 .proc_handler = &proc_dointvec_jiffies,
313 static ctl_table vs_table[] = {
315 .ctl_name = NET_IPV4_VS,
318 .child = vs_vars_table
323 static ctl_table ipvs_ipv4_table[] = {
325 .ctl_name = NET_IPV4,
333 static ctl_table lblcr_root_table[] = {
338 .child = ipvs_ipv4_table
343 static struct ctl_table_header * sysctl_header;
346 * new/free a ip_vs_lblcr_entry, which is a mapping of a destination
347 * IP address to a server.
349 static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__u32 daddr)
351 struct ip_vs_lblcr_entry *en;
353 en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC);
355 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
359 INIT_LIST_HEAD(&en->list);
362 /* initilize its dest set */
363 atomic_set(&(en->set.size), 0);
365 rwlock_init(&en->set.lock);
371 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
374 ip_vs_dest_set_eraseall(&en->set);
380 * Returns hash value for IPVS LBLCR entry
382 static inline unsigned ip_vs_lblcr_hashkey(__u32 addr)
384 return (ntohl(addr)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
389 * Hash an entry in the ip_vs_lblcr_table.
390 * returns bool success.
393 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
397 if (!list_empty(&en->list)) {
398 IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
399 "called from %p\n", __builtin_return_address(0));
404 * Hash by destination IP address
406 hash = ip_vs_lblcr_hashkey(en->addr);
408 write_lock(&tbl->lock);
409 list_add(&en->list, &tbl->bucket[hash]);
410 atomic_inc(&tbl->entries);
411 write_unlock(&tbl->lock);
418 * Get ip_vs_lblcr_entry associated with supplied parameters.
420 static inline struct ip_vs_lblcr_entry *
421 ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __u32 addr)
424 struct ip_vs_lblcr_entry *en;
426 hash = ip_vs_lblcr_hashkey(addr);
428 read_lock(&tbl->lock);
430 list_for_each_entry(en, &tbl->bucket[hash], list) {
431 if (en->addr == addr) {
433 read_unlock(&tbl->lock);
438 read_unlock(&tbl->lock);
445 * Flush all the entries of the specified table.
447 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
450 struct ip_vs_lblcr_entry *en, *nxt;
452 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
453 write_lock(&tbl->lock);
454 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
455 ip_vs_lblcr_free(en);
456 atomic_dec(&tbl->entries);
458 write_unlock(&tbl->lock);
463 static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
465 unsigned long now = jiffies;
467 struct ip_vs_lblcr_entry *en, *nxt;
469 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
470 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
472 write_lock(&tbl->lock);
473 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
474 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
478 ip_vs_lblcr_free(en);
479 atomic_dec(&tbl->entries);
481 write_unlock(&tbl->lock);
488 * Periodical timer handler for IPVS lblcr table
489 * It is used to collect stale entries when the number of entries
490 * exceeds the maximum size of the table.
492 * Fixme: we probably need more complicated algorithm to collect
493 * entries that have not been used for a long time even
494 * if the number of entries doesn't exceed the maximum size
496 * The full expiration check is for this purpose now.
498 static void ip_vs_lblcr_check_expire(unsigned long data)
500 struct ip_vs_lblcr_table *tbl;
501 unsigned long now = jiffies;
504 struct ip_vs_lblcr_entry *en, *nxt;
506 tbl = (struct ip_vs_lblcr_table *)data;
508 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
509 /* do full expiration check */
510 ip_vs_lblcr_full_check(tbl);
515 if (atomic_read(&tbl->entries) <= tbl->max_size) {
520 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
521 if (goal > tbl->max_size/2)
522 goal = tbl->max_size/2;
524 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
525 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
527 write_lock(&tbl->lock);
528 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
529 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
532 ip_vs_lblcr_free(en);
533 atomic_dec(&tbl->entries);
536 write_unlock(&tbl->lock);
543 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
547 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
548 static struct ip_vs_lblcr_table *lblcr_table_list;
551 * /proc/net/ip_vs_lblcr to display the mappings of
552 * destination IP address <==> its serverSet
555 ip_vs_lblcr_getinfo(char *buffer, char **start, off_t offset, int length)
559 struct ip_vs_lblcr_table *tbl;
560 unsigned long now = jiffies;
562 struct ip_vs_lblcr_entry *en;
564 tbl = lblcr_table_list;
566 size = sprintf(buffer, "LastTime Dest IP address Server set\n");
570 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
571 read_lock_bh(&tbl->lock);
572 list_for_each_entry(en, &tbl->bucket[i], list) {
574 struct ip_vs_dest_list *d;
576 sprintf(tbuf, "%u.%u.%u.%u", NIPQUAD(en->addr));
577 size = sprintf(buffer+len, "%8lu %-16s ",
578 now-en->lastuse, tbuf);
580 read_lock(&en->set.lock);
581 for (d=en->set.list; d!=NULL; d=d->next) {
582 size += sprintf(buffer+len+size,
584 NIPQUAD(d->dest->addr));
586 read_unlock(&en->set.lock);
587 size += sprintf(buffer+len+size, "\n");
592 if (pos >= offset+length) {
593 read_unlock_bh(&tbl->lock);
597 read_unlock_bh(&tbl->lock);
601 begin = len - (pos - offset);
602 *start = buffer + begin;
611 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
614 struct ip_vs_lblcr_table *tbl;
617 * Allocate the ip_vs_lblcr_table for this service
619 tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC);
621 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
624 svc->sched_data = tbl;
625 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
627 sizeof(struct ip_vs_lblcr_table));
630 * Initialize the hash buckets
632 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
633 INIT_LIST_HEAD(&tbl->bucket[i]);
635 rwlock_init(&tbl->lock);
636 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
641 * Hook periodic timer for garbage collection
643 init_timer(&tbl->periodic_timer);
644 tbl->periodic_timer.data = (unsigned long)tbl;
645 tbl->periodic_timer.function = ip_vs_lblcr_check_expire;
646 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
647 add_timer(&tbl->periodic_timer);
649 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
650 lblcr_table_list = tbl;
656 static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
658 struct ip_vs_lblcr_table *tbl = svc->sched_data;
660 /* remove periodic timer */
661 del_timer_sync(&tbl->periodic_timer);
663 /* got to clean up table entries here */
664 ip_vs_lblcr_flush(tbl);
666 /* release the table itself */
667 kfree(svc->sched_data);
668 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
669 sizeof(struct ip_vs_lblcr_table));
675 static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc)
681 static inline struct ip_vs_dest *
682 __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
684 struct ip_vs_dest *dest, *least;
688 * We think the overhead of processing active connections is fifty
689 * times higher than that of inactive connections in average. (This
690 * fifty times might not be accurate, we will change it later.) We
691 * use the following formula to estimate the overhead:
692 * dest->activeconns*50 + dest->inactconns
694 * (dest overhead) / dest->weight
696 * Remember -- no floats in kernel mode!!!
697 * The comparison of h1*w2 > h2*w1 is equivalent to that of
699 * if every weight is larger than zero.
701 * The server with weight=0 is quiesced and will not receive any
704 list_for_each_entry(dest, &svc->destinations, n_list) {
705 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
708 if (atomic_read(&dest->weight) > 0) {
710 loh = atomic_read(&least->activeconns) * 50
711 + atomic_read(&least->inactconns);
718 * Find the destination with the least load.
721 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
722 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
725 doh = atomic_read(&dest->activeconns) * 50
726 + atomic_read(&dest->inactconns);
727 if (loh * atomic_read(&dest->weight) >
728 doh * atomic_read(&least->weight)) {
734 IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d "
735 "activeconns %d refcnt %d weight %d overhead %d\n",
736 NIPQUAD(least->addr), ntohs(least->port),
737 atomic_read(&least->activeconns),
738 atomic_read(&least->refcnt),
739 atomic_read(&least->weight), loh);
746 * If this destination server is overloaded and there is a less loaded
747 * server, then return true.
750 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
752 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
753 struct ip_vs_dest *d;
755 list_for_each_entry(d, &svc->destinations, n_list) {
756 if (atomic_read(&d->activeconns)*2
757 < atomic_read(&d->weight)) {
767 * Locality-Based (weighted) Least-Connection scheduling
769 static struct ip_vs_dest *
770 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
772 struct ip_vs_dest *dest;
773 struct ip_vs_lblcr_table *tbl;
774 struct ip_vs_lblcr_entry *en;
775 struct iphdr *iph = skb->nh.iph;
777 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
779 tbl = (struct ip_vs_lblcr_table *)svc->sched_data;
780 en = ip_vs_lblcr_get(tbl, iph->daddr);
782 dest = __ip_vs_wlc_schedule(svc, iph);
784 IP_VS_DBG(1, "no destination available\n");
787 en = ip_vs_lblcr_new(iph->daddr);
791 ip_vs_dest_set_insert(&en->set, dest);
792 ip_vs_lblcr_hash(tbl, en);
794 dest = ip_vs_dest_set_min(&en->set);
795 if (!dest || is_overloaded(dest, svc)) {
796 dest = __ip_vs_wlc_schedule(svc, iph);
798 IP_VS_DBG(1, "no destination available\n");
801 ip_vs_dest_set_insert(&en->set, dest);
803 if (atomic_read(&en->set.size) > 1 &&
804 jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) {
805 struct ip_vs_dest *m;
806 m = ip_vs_dest_set_max(&en->set);
808 ip_vs_dest_set_erase(&en->set, m);
811 en->lastuse = jiffies;
813 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
814 "--> server %u.%u.%u.%u:%d\n",
824 * IPVS LBLCR Scheduler structure
826 static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
829 .refcnt = ATOMIC_INIT(0),
830 .module = THIS_MODULE,
831 .init_service = ip_vs_lblcr_init_svc,
832 .done_service = ip_vs_lblcr_done_svc,
833 .update_service = ip_vs_lblcr_update_svc,
834 .schedule = ip_vs_lblcr_schedule,
838 static int __init ip_vs_lblcr_init(void)
840 INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
841 sysctl_header = register_sysctl_table(lblcr_root_table, 0);
842 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
843 proc_net_create("ip_vs_lblcr", 0, ip_vs_lblcr_getinfo);
845 return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
849 static void __exit ip_vs_lblcr_cleanup(void)
851 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
852 proc_net_remove("ip_vs_lblcr");
854 unregister_sysctl_table(sysctl_header);
855 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
859 module_init(ip_vs_lblcr_init);
860 module_exit(ip_vs_lblcr_cleanup);
861 MODULE_LICENSE("GPL");