]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/sun4v/sun4v/tte_hash.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / sun4v / sun4v / tte_hash.c
1 /*-
2  * Copyright (c) 2006 Kip Macy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/queue.h>
33 #include <sys/ktr.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/smp.h>
38 #include <sys/sysctl.h>
39 #include <sys/systm.h>
40 #ifdef DEBUG
41 #include <sys/kdb.h>
42 #endif
43 #include <vm/vm.h> 
44 #include <vm/vm_object.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_pageout.h>
48 #include <vm/vm_extern.h>
49 #include <vm/uma.h> 
50
51 #include <machine/cpufunc.h>
52 #include <machine/hypervisorvar.h>
53 #include <machine/smp.h>
54 #include <machine/mmu.h>
55 #include <machine/tte.h>
56 #include <machine/vmparam.h>
57 #include <machine/tlb.h>
58 #include <machine/tte_hash.h>
59
60 #define HASH_SIZE        (1 << HASH_ENTRY_SHIFT)
61 #define HASH_MASK(th)    ((1<<(th->th_shift+PAGE_SHIFT-THE_SHIFT))-1)
62 #define NULL_TAG         0
63 #define MAGIC_VALUE      0xcafebabe
64
65 struct tte_hash_entry;
66 struct of_field;
67
68 #define MAX_FRAGMENT_ENTRIES ((PAGE_SIZE / sizeof(struct tte_hash_entry)) - 1)
69
70 typedef struct tte_hash_field_ {
71         uint64_t tag;
72         uint64_t data;
73 } tte_hash_field, *tte_hash_field_t;
74
75 struct of_field {
76         int16_t          count;
77         uint8_t          lock;
78         uint8_t          pad;
79         uint32_t         flags;
80         struct tte_hash_entry *next;
81 };
82
83 typedef struct tte_hash_entry {
84         tte_hash_field the_fields[HASH_ENTRIES];
85         struct of_field of;
86 } *tte_hash_entry_t;
87
88 struct fragment_header {
89         struct tte_hash_fragment *fh_next;
90         uint8_t fh_count;
91         uint8_t fh_free_head;
92         uint8_t pad[sizeof(struct tte_hash_entry) - 10];
93 };
94
95 CTASSERT(sizeof(struct fragment_header) == sizeof(struct tte_hash_entry));
96
97 SLIST_HEAD(tte_hash_list, tte_hash); 
98
99 struct tte_hash_list hash_free_list[PAGE_SHIFT];
100
101 struct tte_hash {
102         uint16_t th_shift;              /* effective size in pages */
103         uint16_t th_context;            /* TLB context   */
104         uint32_t th_entries;            /* # pages held  */
105         tte_hash_entry_t th_hashtable;  /* hash of TTEs  */
106         struct tte_hash_fragment *th_fhhead;
107         struct tte_hash_fragment *th_fhtail;
108         SLIST_ENTRY(tte_hash) th_next;
109 };
110
111 struct tte_hash_fragment {
112         struct fragment_header thf_head;
113         struct tte_hash_entry  thf_entries[MAX_FRAGMENT_ENTRIES];
114 };
115
116 CTASSERT(sizeof(struct tte_hash_fragment) == PAGE_SIZE);
117
118
119 static struct tte_hash kernel_tte_hash;
120 /*
121  * Data for the tte_hash allocation mechanism
122  */
123 static uma_zone_t thzone;
124 static struct vm_object thzone_obj;
125 static int tte_hash_count = 0, tte_hash_max = 0;
126
127 extern uint64_t hash_bucket_lock(tte_hash_field_t fields);
128 extern void hash_bucket_unlock(tte_hash_field_t fields, uint64_t s);
129
130 static tte_hash_t
131 get_tte_hash(void)
132 {
133         tte_hash_t th;
134
135         th = uma_zalloc(thzone, M_NOWAIT);
136
137         KASSERT(th != NULL, ("tte_hash allocation failed"));
138         tte_hash_count++;
139         return th;
140
141 }
142
143 static __inline void
144 free_tte_hash(tte_hash_t th)
145 {
146         tte_hash_count--;
147         uma_zfree(thzone, th);
148 }
149
150 static tte_hash_t
151 tte_hash_cached_get(int shift)
152 {
153         tte_hash_t th;
154         struct tte_hash_list *head;
155
156         th = NULL;
157         head = &hash_free_list[shift];
158         if (!SLIST_EMPTY(head)) {
159                 th = SLIST_FIRST(head);
160                 SLIST_REMOVE_HEAD(head, th_next);
161         }
162         return (th);
163 }
164
165 static void
166 tte_hash_cached_free(tte_hash_t th)
167 {
168         th->th_context = 0xffff;
169         SLIST_INSERT_HEAD(&hash_free_list[th->th_shift - HASH_ENTRY_SHIFT], th, th_next);
170 }
171
172 void 
173 tte_hash_init(void)
174 {
175         int i;
176
177         thzone = uma_zcreate("TTE_HASH", sizeof(struct tte_hash), NULL, NULL, 
178             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
179         tte_hash_max = maxproc;
180         uma_zone_set_obj(thzone, &thzone_obj, tte_hash_max);
181         for (i = 0; i < PAGE_SHIFT; i++)
182                 SLIST_INIT(&hash_free_list[i]); 
183 }
184
185 tte_hash_t
186 tte_hash_kernel_create(vm_offset_t va, uint16_t shift, vm_paddr_t fragment_page)
187 {
188         tte_hash_t th;
189                 
190         th = &kernel_tte_hash;
191         th->th_shift = shift;
192         th->th_entries = 0;
193         th->th_context = 0;
194         th->th_hashtable = (tte_hash_entry_t)va;
195         th->th_fhtail = th->th_fhhead = (void *)TLB_PHYS_TO_DIRECT(fragment_page);
196
197         return (th);
198 }
199
200 static inline void *
201 alloc_zeroed_page(void)
202 {
203         vm_page_t m;
204         static int color;
205         void *ptr;
206
207         m = NULL;
208
209         while (m == NULL) {
210                 m = vm_page_alloc(NULL, color++,
211                     VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
212                     VM_ALLOC_ZERO);
213
214                 if (m == NULL) 
215                         VM_WAIT;
216         }
217
218         if ((m->flags & PG_ZERO) == 0)
219                 pmap_zero_page(m);
220
221         ptr = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
222         return (ptr);
223 }
224
225 static inline void
226 free_fragment_pages(void *ptr)
227 {
228         struct tte_hash_fragment *fh;
229         vm_page_t m;
230         
231         for (fh = ptr; fh != NULL; fh = fh->thf_head.fh_next) {
232                 m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)fh));
233                 m->wire_count--;
234                 atomic_subtract_int(&cnt.v_wire_count, 1);
235                 vm_page_free(m);
236         }
237 }
238
239 static inline tte_hash_t
240 _tte_hash_create(uint64_t context, uint64_t *scratchval, uint16_t shift)
241 {
242         tte_hash_t th;
243         
244         th = get_tte_hash();
245         th->th_shift = shift;
246         th->th_entries = 0;
247         th->th_context = (uint16_t)context;
248
249         th->th_hashtable = pmap_alloc_zeroed_contig_pages((1 << shift), PAGE_SIZE);
250
251         th->th_fhtail = th->th_fhhead = alloc_zeroed_page();
252         KASSERT(th->th_fhtail != NULL, ("th->th_fhtail == NULL"));
253         
254         if (scratchval)
255                 *scratchval = (uint64_t)((vm_offset_t)th->th_hashtable) | ((vm_offset_t)(1 << shift));
256
257         return (th);
258 }
259
260
261 tte_hash_t
262 tte_hash_create(uint64_t context, uint64_t *scratchval)
263 {
264         return (_tte_hash_create(context, scratchval, HASH_ENTRY_SHIFT));
265 }
266
267 void
268 tte_hash_destroy(tte_hash_t th)
269 {
270         tte_hash_cached_free(th);
271 }
272
273 static void
274 _tte_hash_reset(tte_hash_t th)
275 {
276         
277         free_fragment_pages(th->th_fhhead->thf_head.fh_next);
278
279         th->th_fhtail = th->th_fhhead;
280         hwblkclr(th->th_fhhead, PAGE_SIZE); 
281 #if 0
282         if (th->th_entries != 0) 
283 #endif
284                 hwblkclr(th->th_hashtable, (1 << (th->th_shift + PAGE_SHIFT)));
285         th->th_entries = 0;
286 }
287
288 tte_hash_t
289 tte_hash_reset(tte_hash_t th, uint64_t *scratchval)
290 {
291         tte_hash_t newth;
292
293         if (th->th_shift != HASH_ENTRY_SHIFT && (newth = tte_hash_cached_get(0)) != NULL) {
294                 newth->th_context = th->th_context;
295                 tte_hash_cached_free(th);
296                 *scratchval = (uint64_t)((vm_offset_t)newth->th_hashtable) | ((vm_offset_t)HASH_SIZE);
297         } else {
298                 newth = th;
299         }
300         _tte_hash_reset(newth);
301
302         return (newth);
303 }
304
305 static __inline void
306 tte_hash_set_field(tte_hash_field_t field, uint64_t tag, tte_t tte)
307 {
308         field->tag = tag;
309         field->data = tte | (field->data & VTD_LOCK);
310 }
311
312 static __inline tte_hash_entry_t 
313 find_entry(tte_hash_t th, vm_offset_t va, int page_shift)
314 {
315         uint64_t hash_index;
316
317         hash_index = (va >> page_shift) & HASH_MASK(th);
318         return (&th->th_hashtable[hash_index]);
319 }
320
321 static __inline tte_hash_entry_t 
322 tte_hash_lookup_last_entry(tte_hash_entry_t entry)
323 {
324
325         while (entry->of.next) 
326                 entry = entry->of.next;
327
328         return (entry);
329 }
330
331 static tte_hash_entry_t 
332 tte_hash_allocate_fragment_entry(tte_hash_t th)
333 {
334         struct tte_hash_fragment *fh;
335         tte_hash_entry_t newentry;
336         
337         fh = th->th_fhtail;
338         if (fh->thf_head.fh_free_head == MAX_FRAGMENT_ENTRIES) {
339                 fh = th->th_fhtail = fh->thf_head.fh_next = alloc_zeroed_page();
340                 fh->thf_head.fh_free_head = 1;
341 #ifdef NOISY_DEBUG
342                 printf("new fh=%p \n", fh);
343 #endif
344         } 
345         newentry = &fh->thf_entries[fh->thf_head.fh_free_head];
346
347         fh->thf_head.fh_free_head++;
348         fh->thf_head.fh_count++; 
349
350         return (newentry);
351 }
352
353 /*
354  * if a match for va is found the tte value is returned 
355  * and if field is non-null field will point to that entry
356  * 
357  * 
358  */
359 static __inline tte_t 
360 _tte_hash_lookup(tte_hash_entry_t entry, tte_t tte_tag, tte_hash_field_t *field)
361 {
362         int i;
363         tte_t tte_data;
364         tte_hash_field_t fields;
365
366         tte_data = 0;
367         do { 
368                 fields = entry->the_fields;
369                 for (i = 0; i < entry->of.count; i++) {
370                         if (fields[i].tag == tte_tag) {
371                                 tte_data = (fields[i].data & ~VTD_LOCK);
372                                 *field = &fields[i];
373                                 goto done;
374                         }
375                 }
376 #ifdef DEBUG
377         if (entry->of.next && entry->of.flags != MAGIC_VALUE)
378                 panic("overflow pointer not null without flags set entry= %p next=%p flags=0x%x count=%d", 
379                       entry, entry->of.next, entry->of.flags, entry->of.count);
380 #endif
381                 entry = entry->of.next;
382         } while (entry);
383
384 done:
385         return (tte_data);
386 }
387
388
389 static __inline void
390 _tte_hash_lookup_last(tte_hash_entry_t entry, tte_hash_field_t *field)
391 {
392
393         tte_hash_field_t fields;
394
395         fields = entry->the_fields;
396
397         while (entry->of.next && (entry->of.next->of.count > 1))
398                 entry = entry->of.next;
399
400         if (entry->of.next && entry->of.next->of.count == 1) {
401                 *field = &entry->of.next->the_fields[0];
402                 entry->of.next = NULL;
403                 entry->of.flags = 0;
404         } else {
405 #ifdef DEBUG
406                 if (entry->of.count == 0)
407                         panic("count zero");
408 #endif
409                 *field = &entry->the_fields[--entry->of.count];
410         }
411 }
412
413 tte_t
414 tte_hash_clear_bits(tte_hash_t th, vm_offset_t va, uint64_t flags)
415 {
416         uint64_t s;
417         tte_hash_entry_t entry;
418         tte_t otte_data, tte_tag;
419         tte_hash_field_t field = NULL;
420
421         /* XXX - only handle 8K pages for now */
422         entry = find_entry(th, va, PAGE_SHIFT);
423
424         tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
425         
426         s = hash_bucket_lock(entry->the_fields);
427         if((otte_data = _tte_hash_lookup(entry, tte_tag, &field)) != 0)
428                 tte_hash_set_field(field, field->tag, field->data & ~flags);
429         hash_bucket_unlock(entry->the_fields, s);
430         return (otte_data);
431 }
432
433 tte_t
434 tte_hash_delete(tte_hash_t th, vm_offset_t va)
435 {
436         uint64_t s;
437         tte_hash_entry_t entry;
438         tte_t tte_data, tte_tag;
439         tte_hash_field_t lookup_field = NULL; 
440         tte_hash_field_t last_field = NULL;
441
442         /* XXX - only handle 8K pages for now */
443         entry = find_entry(th, va, PAGE_SHIFT);
444
445         tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
446
447         s  = hash_bucket_lock(entry->the_fields);
448         
449         if ((tte_data = _tte_hash_lookup(entry, tte_tag, &lookup_field)) == 0) 
450                 goto done;
451
452         _tte_hash_lookup_last(entry, &last_field);
453
454 #ifdef DEBUG
455         if (last_field->tag == 0) {
456                 hash_bucket_unlock(entry->the_fields, s);
457                 panic("lookup_last failed for va=0x%lx\n", va);
458         }
459 #endif
460         /* move last field's values in to the field we are deleting */
461         if (lookup_field != last_field) 
462                 tte_hash_set_field(lookup_field, last_field->tag, last_field->data);
463         
464         tte_hash_set_field(last_field, 0, 0);
465 done:   
466         hash_bucket_unlock(entry->the_fields, s);
467         if (tte_data) 
468                 th->th_entries--;
469
470         return (tte_data);
471 }
472
473 static __inline int 
474 tte_hash_insert_locked(tte_hash_t th, tte_hash_entry_t entry, uint64_t tte_tag, tte_t tte_data)
475 {
476         tte_hash_entry_t lentry;
477
478         lentry = tte_hash_lookup_last_entry(entry);
479
480         if (lentry->of.count == HASH_ENTRIES) 
481                 return -1;
482         tte_hash_set_field(&lentry->the_fields[lentry->of.count++], 
483                            tte_tag, tte_data);
484         th->th_entries++;
485         return (0);
486 }
487
488 static __inline void
489 tte_hash_extend_locked(tte_hash_t th, tte_hash_entry_t entry, tte_hash_entry_t newentry, uint64_t tte_tag, tte_t tte_data)
490 {
491         tte_hash_entry_t lentry;
492
493         lentry = tte_hash_lookup_last_entry(entry);
494         lentry->of.flags = MAGIC_VALUE;
495         lentry->of.next = newentry;
496         tte_hash_set_field(&newentry->the_fields[newentry->of.count++], tte_tag, tte_data);
497         th->th_entries++;
498 }
499
500 void
501 tte_hash_insert(tte_hash_t th, vm_offset_t va, tte_t tte_data)
502 {
503
504         tte_hash_entry_t entry, newentry;
505         tte_t tte_tag;
506         uint64_t s;
507         int retval;
508         
509 #ifdef DEBUG
510         if (tte_hash_lookup(th, va) != 0) 
511                 panic("mapping for va=0x%lx already exists", va);
512 #endif
513         entry = find_entry(th, va, PAGE_SHIFT); /* should actually be a function of tte_data */
514         tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
515
516         s = hash_bucket_lock(entry->the_fields);
517         retval = tte_hash_insert_locked(th, entry, tte_tag, tte_data);
518         hash_bucket_unlock(entry->the_fields, s);
519
520         if (retval == -1) {
521                 newentry = tte_hash_allocate_fragment_entry(th); 
522                 s = hash_bucket_lock(entry->the_fields);
523                 tte_hash_extend_locked(th, entry, newentry, tte_tag, tte_data);
524                 hash_bucket_unlock(entry->the_fields, s);
525         }
526
527 #ifdef DEBUG
528         if (tte_hash_lookup(th, va) == 0) 
529                 panic("insert for va=0x%lx failed", va);
530 #endif
531 }
532
533 /* 
534  * If leave_locked is true the tte's data field will be returned to
535  * the caller with the hash bucket left locked
536  */
537 tte_t 
538 tte_hash_lookup(tte_hash_t th, vm_offset_t va)
539 {
540         uint64_t s;
541         tte_hash_entry_t entry;
542         tte_t tte_data, tte_tag;
543         tte_hash_field_t field = NULL;
544         /* XXX - only handle 8K pages for now */
545         entry = find_entry(th, va, PAGE_SHIFT);
546
547         tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
548
549         s = hash_bucket_lock(entry->the_fields);
550         tte_data = _tte_hash_lookup(entry, tte_tag, &field);
551         hash_bucket_unlock(entry->the_fields, s);
552         
553         return (tte_data);
554 }
555
556 uint64_t
557 tte_hash_set_scratchpad_kernel(tte_hash_t th)
558 {
559         
560         uint64_t hash_scratch;
561         /* This breaks if a hash table grows above 32MB
562          */
563         hash_scratch = ((vm_offset_t)th->th_hashtable) | ((vm_offset_t)(1<<th->th_shift));
564         set_hash_kernel_scratchpad(hash_scratch);
565         
566         return (hash_scratch);
567 }
568
569 uint64_t
570 tte_hash_set_scratchpad_user(tte_hash_t th, uint64_t context)
571 {
572
573         uint64_t hash_scratch;
574         /* This breaks if a hash table grows above 32MB
575          */
576         th->th_context = (uint16_t)context;
577         hash_scratch = ((vm_offset_t)th->th_hashtable) | ((vm_offset_t)(1<<th->th_shift));
578         set_hash_user_scratchpad(hash_scratch);
579         
580         return (hash_scratch);
581 }
582
583 tte_t
584 tte_hash_update(tte_hash_t th, vm_offset_t va, tte_t tte_data)
585 {
586
587         uint64_t s;
588         tte_hash_entry_t entry;
589         tte_t otte_data, tte_tag;
590         tte_hash_field_t field = NULL;
591
592         entry = find_entry(th, va, PAGE_SHIFT); /* should actualy be a function of tte_data */
593
594         tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
595         s = hash_bucket_lock(entry->the_fields);
596         otte_data = _tte_hash_lookup(entry, tte_tag, &field);
597
598         if (otte_data == 0) {
599                 hash_bucket_unlock(entry->the_fields, s);
600                 tte_hash_insert(th, va, tte_data);
601         } else {
602                 tte_hash_set_field(field, tte_tag, tte_data);
603                 hash_bucket_unlock(entry->the_fields, s);
604         }
605         return (otte_data);
606 }
607
608 /*
609  * resize when the average entry has a full fragment entry
610  */
611 int
612 tte_hash_needs_resize(tte_hash_t th)
613 {
614         return ((th->th_entries > (1 << (th->th_shift + PAGE_SHIFT - TTE_SHIFT + 1))) 
615                 && (th != &kernel_tte_hash));
616 }
617
618 tte_hash_t
619 tte_hash_resize(tte_hash_t th)
620 {
621         int i, j, nentries;
622         tte_hash_t newth;
623         tte_hash_entry_t src_entry, dst_entry, newentry;
624
625         KASSERT(th != &kernel_tte_hash,("tte_hash_resize not supported for this pmap"));
626         if ((newth = tte_hash_cached_get((th->th_shift - HASH_ENTRY_SHIFT) + 1)) != NULL) {
627                 newth->th_context = th->th_context;
628                 _tte_hash_reset(newth);
629         } else {
630                 newth = _tte_hash_create(th->th_context, NULL, (th->th_shift + 1));
631         }
632
633         nentries = (1 << (th->th_shift + PAGE_SHIFT - THE_SHIFT));
634         for (i = 0; i < nentries; i++) {
635                 tte_hash_field_t fields;
636                 src_entry = (&th->th_hashtable[i]);
637                 do {
638                         fields = src_entry->the_fields;
639                         for (j = 0; j < src_entry->of.count; j++) {
640                                 int shift = TTARGET_VA_SHIFT - PAGE_SHIFT;
641                                 uint64_t index = ((fields[j].tag<<shift) | (uint64_t)(i&((1<<shift)-1))) & HASH_MASK(newth);    
642                                 dst_entry = &(newth->th_hashtable[index]);
643                                 if (tte_hash_insert_locked(newth, dst_entry, fields[j].tag, fields[j].data) == -1) {
644                                         newentry = tte_hash_allocate_fragment_entry(newth); 
645                                         tte_hash_extend_locked(newth, dst_entry, newentry, fields[j].tag, fields[j].data);
646                                 }
647                         }                
648                         src_entry = src_entry->of.next;
649                 } while (src_entry);
650         }
651
652         KASSERT(th->th_entries == newth->th_entries, 
653                 ("not all entries copied old=%d new=%d", th->th_entries, newth->th_entries));
654         
655         return (newth);
656 }