runtime: copy runtime package time code from Go 1.7

Fix handling of function values for -fgo-c-header to generate FuncVal*,
    not simply FuncVal.
    
    While we're here change runtime.nanotime to use clock_gettime with
    CLOCK_MONOTONIC, rather than gettimeofday.  This is what the gc library
    does.  It provides nanosecond precision and a monotonic clock.
    
    Reviewed-on: https://go-review.googlesource.com/31232

From-SVN: r241197
This commit is contained in:
Ian Lance Taylor 2016-10-15 00:29:06 +00:00
parent 2a0b23da05
commit 35d9424444
12 changed files with 339 additions and 424 deletions

View file

@ -1,4 +1,4 @@
ec3dc927da71d15cac48a13c0fb0c1f94572d0d2 880cb0a45590d992880fc6aabc7484e54c817eeb
The first line of this file holds the git revision number of the last The first line of this file holds the git revision number of the last
merge done from the gofrontend repository. merge done from the gofrontend repository.

View file

@ -5928,7 +5928,7 @@ Struct_type::write_field_to_c_header(std::ostream& os, const std::string& name,
break; break;
case TYPE_FUNCTION: case TYPE_FUNCTION:
os << "FuncVal"; os << "FuncVal*";
break; break;
case TYPE_POINTER: case TYPE_POINTER:

View file

@ -519,7 +519,6 @@ runtime_files = \
reflect.c \ reflect.c \
runtime1.c \ runtime1.c \
sigqueue.c \ sigqueue.c \
time.c \
$(runtime_getncpu_file) $(runtime_getncpu_file)
goc2c.$(OBJEXT): runtime/goc2c.c goc2c.$(OBJEXT): runtime/goc2c.c

View file

@ -262,8 +262,7 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \
$(am__objects_2) panic.lo parfor.lo print.lo proc.lo \ $(am__objects_2) panic.lo parfor.lo print.lo proc.lo \
runtime.lo signal_unix.lo thread.lo $(am__objects_3) yield.lo \ runtime.lo signal_unix.lo thread.lo $(am__objects_3) yield.lo \
$(am__objects_4) go-iface.lo lfstack.lo malloc.lo netpoll.lo \ $(am__objects_4) go-iface.lo lfstack.lo malloc.lo netpoll.lo \
rdebug.lo reflect.lo runtime1.lo sigqueue.lo time.lo \ rdebug.lo reflect.lo runtime1.lo sigqueue.lo $(am__objects_5)
$(am__objects_5)
am_libgo_llgo_la_OBJECTS = $(am__objects_6) am_libgo_llgo_la_OBJECTS = $(am__objects_6)
libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS) libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
@ -918,7 +917,6 @@ runtime_files = \
reflect.c \ reflect.c \
runtime1.c \ runtime1.c \
sigqueue.c \ sigqueue.c \
time.c \
$(runtime_getncpu_file) $(runtime_getncpu_file)
noinst_DATA = zstdpkglist.go noinst_DATA = zstdpkglist.go
@ -1636,7 +1634,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/thread-linux.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/thread-linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/thread-sema.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/thread-sema.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/thread.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/thread.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/time.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/yield.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/yield.Plo@am__quote@
.c.o: .c.o:

View file

@ -196,15 +196,15 @@ func getcallersp(argp unsafe.Pointer) uintptr
// argp used in Defer structs when there is no argp. // argp used in Defer structs when there is no argp.
const _NoArgs = ^uintptr(0) const _NoArgs = ^uintptr(0)
// //go:linkname time_now time.now //go:linkname time_now time.now
// func time_now() (sec int64, nsec int32) func time_now() (sec int64, nsec int32)
/* // For gccgo, expose this for C callers.
//go:linkname unixnanotime runtime.unixnanotime
func unixnanotime() int64 { func unixnanotime() int64 {
sec, nsec := time_now() sec, nsec := time_now()
return sec*1e9 + int64(nsec) return sec*1e9 + int64(nsec)
} }
*/
// round n up to a multiple of a. a must be a power of 2. // round n up to a multiple of a. a must be a power of 2.
func round(n, a uintptr) uintptr { func round(n, a uintptr) uintptr {

307
libgo/go/runtime/time.go Normal file
View file

@ -0,0 +1,307 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Time-related runtime and pieces of package time.
package runtime
import "unsafe"
// Export temporarily for gccgo's C code to call:
//go:linkname addtimer runtime.addtimer
//go:linkname deltimer runtime.deltimer
// Package time knows the layout of this structure.
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
// For GOOS=nacl, package syscall knows the layout of this structure.
// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
type timer struct {
i int // heap index
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
// each time calling f(arg, now) in the timer goroutine, so f must be
// a well-behaved function and not block.
when int64
period int64
f func(interface{}, uintptr)
arg interface{}
seq uintptr
}
var timers struct {
lock mutex
gp *g
created bool
sleeping bool
rescheduling bool
waitnote note
t []*timer
}
// nacl fake time support - time in nanoseconds since 1970
var faketime int64
// Package time APIs.
// Godoc uses the comments in package time, not these.
// time.now is implemented in assembly.
// timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
//go:linkname timeSleep time.Sleep
func timeSleep(ns int64) {
if ns <= 0 {
return
}
t := new(timer)
t.when = nanotime() + ns
t.f = goroutineReady
t.arg = getg()
lock(&timers.lock)
addtimerLocked(t)
goparkunlock(&timers.lock, "sleep", traceEvGoSleep, 2)
}
// startTimer adds t to the timer heap.
//go:linkname startTimer time.startTimer
func startTimer(t *timer) {
if raceenabled {
racerelease(unsafe.Pointer(t))
}
addtimer(t)
}
// stopTimer removes t from the timer heap if it is there.
// It returns true if t was removed, false if t wasn't even there.
//go:linkname stopTimer time.stopTimer
func stopTimer(t *timer) bool {
return deltimer(t)
}
// Go runtime.
// Ready the goroutine arg.
func goroutineReady(arg interface{}, seq uintptr) {
goready(arg.(*g), 0)
}
func addtimer(t *timer) {
lock(&timers.lock)
addtimerLocked(t)
unlock(&timers.lock)
}
// Add a timer to the heap and start or kick the timer proc.
// If the new timer is earlier than any of the others.
// Timers are locked.
func addtimerLocked(t *timer) {
// when must never be negative; otherwise timerproc will overflow
// during its delta calculation and never expire other runtime·timers.
if t.when < 0 {
t.when = 1<<63 - 1
}
t.i = len(timers.t)
timers.t = append(timers.t, t)
siftupTimer(t.i)
if t.i == 0 {
// siftup moved to top: new earliest deadline.
if timers.sleeping {
timers.sleeping = false
notewakeup(&timers.waitnote)
}
if timers.rescheduling {
timers.rescheduling = false
goready(timers.gp, 0)
}
}
if !timers.created {
timers.created = true
go timerproc()
}
}
// Delete timer t from the heap.
// Do not need to update the timerproc: if it wakes up early, no big deal.
func deltimer(t *timer) bool {
// Dereference t so that any panic happens before the lock is held.
// Discard result, because t might be moving in the heap.
_ = t.i
lock(&timers.lock)
// t may not be registered anymore and may have
// a bogus i (typically 0, if generated by Go).
// Verify it before proceeding.
i := t.i
last := len(timers.t) - 1
if i < 0 || i > last || timers.t[i] != t {
unlock(&timers.lock)
return false
}
if i != last {
timers.t[i] = timers.t[last]
timers.t[i].i = i
}
timers.t[last] = nil
timers.t = timers.t[:last]
if i != last {
siftupTimer(i)
siftdownTimer(i)
}
unlock(&timers.lock)
return true
}
// Timerproc runs the time-driven events.
// It sleeps until the next event in the timers heap.
// If addtimer inserts a new earlier event, addtimer1 wakes timerproc early.
func timerproc() {
timers.gp = getg()
for {
lock(&timers.lock)
timers.sleeping = false
now := nanotime()
delta := int64(-1)
for {
if len(timers.t) == 0 {
delta = -1
break
}
t := timers.t[0]
delta = t.when - now
if delta > 0 {
break
}
if t.period > 0 {
// leave in heap but adjust next time to fire
t.when += t.period * (1 + -delta/t.period)
siftdownTimer(0)
} else {
// remove from heap
last := len(timers.t) - 1
if last > 0 {
timers.t[0] = timers.t[last]
timers.t[0].i = 0
}
timers.t[last] = nil
timers.t = timers.t[:last]
if last > 0 {
siftdownTimer(0)
}
t.i = -1 // mark as removed
}
f := t.f
arg := t.arg
seq := t.seq
unlock(&timers.lock)
if raceenabled {
raceacquire(unsafe.Pointer(t))
}
f(arg, seq)
lock(&timers.lock)
}
if delta < 0 || faketime > 0 {
// No timers left - put goroutine to sleep.
timers.rescheduling = true
goparkunlock(&timers.lock, "timer goroutine (idle)", traceEvGoBlock, 1)
continue
}
// At least one timer pending. Sleep until then.
timers.sleeping = true
noteclear(&timers.waitnote)
unlock(&timers.lock)
notetsleepg(&timers.waitnote, delta)
}
}
func timejump() *g {
if faketime == 0 {
return nil
}
lock(&timers.lock)
if !timers.created || len(timers.t) == 0 {
unlock(&timers.lock)
return nil
}
var gp *g
if faketime < timers.t[0].when {
faketime = timers.t[0].when
if timers.rescheduling {
timers.rescheduling = false
gp = timers.gp
}
}
unlock(&timers.lock)
return gp
}
// Heap maintenance algorithms.
func siftupTimer(i int) {
t := timers.t
when := t[i].when
tmp := t[i]
for i > 0 {
p := (i - 1) / 4 // parent
if when >= t[p].when {
break
}
t[i] = t[p]
t[i].i = i
t[p] = tmp
t[p].i = p
i = p
}
}
func siftdownTimer(i int) {
t := timers.t
n := len(t)
when := t[i].when
tmp := t[i]
for {
c := i*4 + 1 // left child
c3 := c + 2 // mid child
if c >= n {
break
}
w := t[c].when
if c+1 < n && t[c+1].when < w {
w = t[c+1].when
c++
}
if c3 < n {
w3 := t[c3].when
if c3+1 < n && t[c3+1].when < w3 {
w3 = t[c3+1].when
c3++
}
if w3 < w {
w = w3
c = c3
}
}
if w >= when {
break
}
t[i] = t[c]
t[i].i = i
t[c] = tmp
t[c].i = c
i = c
}
}
// Entry points for net, time to call nanotime.
//go:linkname net_runtimeNano net.runtimeNano
func net_runtimeNano() int64 {
return nanotime()
}
//go:linkname time_runtimeNano time.runtimeNano
func time_runtimeNano() int64 {
return nanotime()
}

View file

@ -14,8 +14,8 @@ int64 runtime_nanotime (void)
int64 int64
runtime_nanotime (void) runtime_nanotime (void)
{ {
struct timeval tv; struct timespec ts;
gettimeofday (&tv, NULL); clock_gettime (CLOCK_MONOTONIC, &ts);
return (int64) tv.tv_sec * 1000000000 + (int64) tv.tv_usec * 1000; return (int64) ts.tv_sec * 1000000000 + (int64) ts.tv_nsec;
} }

View file

@ -543,5 +543,4 @@ int32 runtime_setgcpercent(int32);
struct Workbuf; struct Workbuf;
void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj)); void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj)); void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));

View file

@ -1277,7 +1277,6 @@ markroot(ParFor *desc, uint32 i)
enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0}); enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0}); enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
runtime_proc_scan(&wbuf, enqueue1); runtime_proc_scan(&wbuf, enqueue1);
runtime_time_scan(&wbuf, enqueue1);
runtime_netpoll_scan(&wbuf, enqueue1); runtime_netpoll_scan(&wbuf, enqueue1);
break; break;

View file

@ -89,11 +89,6 @@ static FuncVal deadlineFn = {(void(*)(void))deadline};
static FuncVal readDeadlineFn = {(void(*)(void))readDeadline}; static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline}; static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
// runtimeNano returns the current value of the runtime clock in nanoseconds.
func runtimeNano() (ns int64) {
ns = runtime_nanotime();
}
func runtime_pollServerInit() { func runtime_pollServerInit() {
runtime_netpollinit(); runtime_netpollinit();
} }
@ -176,13 +171,13 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
} }
pd->seq++; // invalidate current timers pd->seq++; // invalidate current timers
// Reset current timers. // Reset current timers.
if(pd->rt.fv) { if(pd->rt.f) {
runtime_deltimer(&pd->rt); runtime_deltimer(&pd->rt);
pd->rt.fv = nil; pd->rt.f = nil;
} }
if(pd->wt.fv) { if(pd->wt.f) {
runtime_deltimer(&pd->wt); runtime_deltimer(&pd->wt);
pd->wt.fv = nil; pd->wt.f = nil;
} }
// Setup new timers. // Setup new timers.
if(d != 0 && d <= runtime_nanotime()) if(d != 0 && d <= runtime_nanotime())
@ -192,7 +187,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
if(mode == 'w' || mode == 'r'+'w') if(mode == 'w' || mode == 'r'+'w')
pd->wd = d; pd->wd = d;
if(pd->rd > 0 && pd->rd == pd->wd) { if(pd->rd > 0 && pd->rd == pd->wd) {
pd->rt.fv = &deadlineFn; pd->rt.f = &deadlineFn;
pd->rt.when = pd->rd; pd->rt.when = pd->rd;
// Copy current seq into the timer arg. // Copy current seq into the timer arg.
// Timer func will check the seq against current descriptor seq, // Timer func will check the seq against current descriptor seq,
@ -203,7 +198,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
runtime_addtimer(&pd->rt); runtime_addtimer(&pd->rt);
} else { } else {
if(pd->rd > 0) { if(pd->rd > 0) {
pd->rt.fv = &readDeadlineFn; pd->rt.f = &readDeadlineFn;
pd->rt.when = pd->rd; pd->rt.when = pd->rd;
pd->rt.arg.type = nil; // should be *pollDesc type descriptor. pd->rt.arg.type = nil; // should be *pollDesc type descriptor.
pd->rt.arg.data = pd; pd->rt.arg.data = pd;
@ -211,7 +206,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
runtime_addtimer(&pd->rt); runtime_addtimer(&pd->rt);
} }
if(pd->wd > 0) { if(pd->wd > 0) {
pd->wt.fv = &writeDeadlineFn; pd->wt.f = &writeDeadlineFn;
pd->wt.when = pd->wd; pd->wt.when = pd->wd;
pd->wt.arg.type = nil; // should be *pollDesc type descriptor. pd->wt.arg.type = nil; // should be *pollDesc type descriptor.
pd->wt.arg.data = pd; pd->wt.arg.data = pd;
@ -244,13 +239,13 @@ func runtime_pollUnblock(pd *PollDesc) {
runtime_atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock runtime_atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock
rg = netpollunblock(pd, 'r', false); rg = netpollunblock(pd, 'r', false);
wg = netpollunblock(pd, 'w', false); wg = netpollunblock(pd, 'w', false);
if(pd->rt.fv) { if(pd->rt.f) {
runtime_deltimer(&pd->rt); runtime_deltimer(&pd->rt);
pd->rt.fv = nil; pd->rt.f = nil;
} }
if(pd->wt.fv) { if(pd->wt.f) {
runtime_deltimer(&pd->wt); runtime_deltimer(&pd->wt);
pd->wt.fv = nil; pd->wt.f = nil;
} }
runtime_unlock(pd); runtime_unlock(pd);
if(rg) if(rg)
@ -408,17 +403,17 @@ deadlineimpl(Eface arg, uintptr seq, bool read, bool write)
return; return;
} }
if(read) { if(read) {
if(pd->rd <= 0 || pd->rt.fv == nil) if(pd->rd <= 0 || pd->rt.f == nil)
runtime_throw("deadlineimpl: inconsistent read deadline"); runtime_throw("deadlineimpl: inconsistent read deadline");
pd->rd = -1; pd->rd = -1;
runtime_atomicstorep(&pd->rt.fv, nil); // full memory barrier between store to rd and load of rg in netpollunblock runtime_atomicstorep(&pd->rt.f, nil); // full memory barrier between store to rd and load of rg in netpollunblock
rg = netpollunblock(pd, 'r', false); rg = netpollunblock(pd, 'r', false);
} }
if(write) { if(write) {
if(pd->wd <= 0 || (pd->wt.fv == nil && !read)) if(pd->wd <= 0 || (pd->wt.f == nil && !read))
runtime_throw("deadlineimpl: inconsistent write deadline"); runtime_throw("deadlineimpl: inconsistent write deadline");
pd->wd = -1; pd->wd = -1;
runtime_atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock runtime_atomicstorep(&pd->wt.f, nil); // full memory barrier between store to wd and load of wg in netpollunblock
wg = netpollunblock(pd, 'w', false); wg = netpollunblock(pd, 'w', false);
} }
runtime_unlock(pd); runtime_unlock(pd);

View file

@ -66,8 +66,7 @@ typedef struct SigTab SigTab;
typedef struct mcache MCache; typedef struct mcache MCache;
typedef struct FixAlloc FixAlloc; typedef struct FixAlloc FixAlloc;
typedef struct hchan Hchan; typedef struct hchan Hchan;
typedef struct Timers Timers; typedef struct timer Timer;
typedef struct Timer Timer;
typedef struct gcstats GCStats; typedef struct gcstats GCStats;
typedef struct LFNode LFNode; typedef struct LFNode LFNode;
typedef struct ParFor ParFor; typedef struct ParFor ParFor;
@ -181,36 +180,6 @@ enum {
}; };
#endif #endif
struct Timers
{
Lock;
G *timerproc;
bool sleeping;
bool rescheduling;
Note waitnote;
Timer **t;
int32 len;
int32 cap;
};
// Package time knows the layout of this structure.
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
// For GOOS=nacl, package syscall knows the layout of this structure.
// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
struct Timer
{
intgo i; // heap index
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
// each time calling f(now, arg) in the timer goroutine, so f must be
// a well-behaved function and not block.
int64 when;
int64 period;
FuncVal *fv;
Eface arg;
uintptr seq;
};
// Lock-free stack node. // Lock-free stack node.
struct LFNode struct LFNode
{ {
@ -403,7 +372,8 @@ bool __go_sigsend(int32 sig);
int32 runtime_callers(int32, Location*, int32, bool keep_callers); int32 runtime_callers(int32, Location*, int32, bool keep_callers);
int64 runtime_nanotime(void) // monotonic time int64 runtime_nanotime(void) // monotonic time
__asm__(GOSYM_PREFIX "runtime.nanotime"); __asm__(GOSYM_PREFIX "runtime.nanotime");
int64 runtime_unixnanotime(void); // real time, can skip int64 runtime_unixnanotime(void) // real time, can skip
__asm__ (GOSYM_PREFIX "runtime.unixnanotime");
void runtime_dopanic(int32) __attribute__ ((noreturn)); void runtime_dopanic(int32) __attribute__ ((noreturn));
void runtime_startpanic(void); void runtime_startpanic(void);
void runtime_freezetheworld(void); void runtime_freezetheworld(void);
@ -422,8 +392,10 @@ int64 runtime_tickspersecond(void)
__asm__ (GOSYM_PREFIX "runtime.tickspersecond"); __asm__ (GOSYM_PREFIX "runtime.tickspersecond");
void runtime_blockevent(int64, int32); void runtime_blockevent(int64, int32);
extern int64 runtime_blockprofilerate; extern int64 runtime_blockprofilerate;
void runtime_addtimer(Timer*); void runtime_addtimer(Timer*)
bool runtime_deltimer(Timer*); __asm__ (GOSYM_PREFIX "runtime.addtimer");
bool runtime_deltimer(Timer*)
__asm__ (GOSYM_PREFIX "runtime.deltimer");
G* runtime_netpoll(bool); G* runtime_netpoll(bool);
void runtime_netpollinit(void); void runtime_netpollinit(void);
int32 runtime_netpollopen(uintptr, PollDesc*); int32 runtime_netpollopen(uintptr, PollDesc*);

View file

@ -1,353 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Time-related runtime and pieces of package time.
package time
#include <sys/time.h>
#include "runtime.h"
#include "defs.h"
#include "arch.h"
#include "malloc.h"
enum {
debug = 0,
};
static Timers timers;
static void addtimer(Timer*);
static void dumptimers(const char*);
// nacl fake time support.
int64 runtime_timens;
// Package time APIs.
// Godoc uses the comments in package time, not these.
// time.now is implemented in assembly.
// runtimeNano returns the current value of the runtime clock in nanoseconds.
func runtimeNano() (ns int64) {
ns = runtime_nanotime();
}
// Sleep puts the current goroutine to sleep for at least ns nanoseconds.
func Sleep(ns int64) {
runtime_tsleep(ns, "sleep");
}
// startTimer adds t to the timer heap.
func startTimer(t *Timer) {
runtime_addtimer(t);
}
// stopTimer removes t from the timer heap if it is there.
// It returns true if t was removed, false if t wasn't even there.
func stopTimer(t *Timer) (stopped bool) {
stopped = runtime_deltimer(t);
}
// C runtime.
int64 runtime_unixnanotime(void)
{
struct time_now_ret r;
r = now();
return r.sec*1000000000 + r.nsec;
}
static void timerproc(void*);
static void siftup(int32);
static void siftdown(int32);
// Ready the goroutine e.data.
static void
ready(Eface e, uintptr seq)
{
USED(seq);
runtime_ready(e.__object);
}
static FuncVal readyv = {(void(*)(void))ready};
// Put the current goroutine to sleep for ns nanoseconds.
void
runtime_tsleep(int64 ns, const char *reason)
{
G* g;
Timer t;
g = runtime_g();
if(ns <= 0)
return;
t.when = runtime_nanotime() + ns;
t.period = 0;
t.fv = &readyv;
t.arg.__object = g;
t.seq = 0;
runtime_lock(&timers);
addtimer(&t);
runtime_parkunlock(&timers, reason);
}
void
runtime_addtimer(Timer *t)
{
runtime_lock(&timers);
addtimer(t);
runtime_unlock(&timers);
}
// Add a timer to the heap and start or kick the timer proc
// if the new timer is earlier than any of the others.
static void
addtimer(Timer *t)
{
int32 n;
Timer **nt;
// when must never be negative; otherwise timerproc will overflow
// during its delta calculation and never expire other timers.
if(t->when < 0)
t->when = (int64)((1ULL<<63)-1);
if(timers.len >= timers.cap) {
// Grow slice.
n = 16;
if(n <= timers.cap)
n = timers.cap*3 / 2;
nt = runtime_malloc(n*sizeof nt[0]);
runtime_memmove(nt, timers.t, timers.len*sizeof nt[0]);
runtime_free(timers.t);
timers.t = nt;
timers.cap = n;
}
t->i = timers.len++;
timers.t[t->i] = t;
siftup(t->i);
if(t->i == 0) {
// siftup moved to top: new earliest deadline.
if(timers.sleeping) {
timers.sleeping = false;
runtime_notewakeup(&timers.waitnote);
}
if(timers.rescheduling) {
timers.rescheduling = false;
runtime_ready(timers.timerproc);
}
}
if(timers.timerproc == nil) {
timers.timerproc = __go_go(timerproc, nil);
timers.timerproc->issystem = true;
}
if(debug)
dumptimers("addtimer");
}
// Used to force a dereference before the lock is acquired.
static int32 gi;
// Delete timer t from the heap.
// Do not need to update the timerproc:
// if it wakes up early, no big deal.
bool
runtime_deltimer(Timer *t)
{
int32 i;
// Dereference t so that any panic happens before the lock is held.
// Discard result, because t might be moving in the heap.
i = t->i;
gi = i;
runtime_lock(&timers);
// t may not be registered anymore and may have
// a bogus i (typically 0, if generated by Go).
// Verify it before proceeding.
i = t->i;
if(i < 0 || i >= timers.len || timers.t[i] != t) {
runtime_unlock(&timers);
return false;
}
timers.len--;
if(i == timers.len) {
timers.t[i] = nil;
} else {
timers.t[i] = timers.t[timers.len];
timers.t[timers.len] = nil;
timers.t[i]->i = i;
siftup(i);
siftdown(i);
}
if(debug)
dumptimers("deltimer");
runtime_unlock(&timers);
return true;
}
// Timerproc runs the time-driven events.
// It sleeps until the next event in the timers heap.
// If addtimer inserts a new earlier event, addtimer
// wakes timerproc early.
static void
timerproc(void* dummy __attribute__ ((unused)))
{
int64 delta, now;
Timer *t;
FuncVal *fv;
void (*f)(Eface, uintptr);
Eface arg;
uintptr seq;
for(;;) {
runtime_lock(&timers);
timers.sleeping = false;
now = runtime_nanotime();
for(;;) {
if(timers.len == 0) {
delta = -1;
break;
}
t = timers.t[0];
delta = t->when - now;
if(delta > 0)
break;
if(t->period > 0) {
// leave in heap but adjust next time to fire
t->when += t->period * (1 + -delta/t->period);
siftdown(0);
} else {
// remove from heap
timers.t[0] = timers.t[--timers.len];
timers.t[0]->i = 0;
siftdown(0);
t->i = -1; // mark as removed
}
fv = t->fv;
f = (void*)t->fv->fn;
arg = t->arg;
seq = t->seq;
runtime_unlock(&timers);
__builtin_call_with_static_chain(f(arg, seq), fv);
// clear f and arg to avoid leak while sleeping for next timer
f = nil;
USED(f);
arg.__type_descriptor = nil;
arg.__object = nil;
USED(&arg);
runtime_lock(&timers);
}
if(delta < 0) {
// No timers left - put goroutine to sleep.
timers.rescheduling = true;
runtime_g()->isbackground = true;
runtime_parkunlock(&timers, "timer goroutine (idle)");
runtime_g()->isbackground = false;
continue;
}
// At least one timer pending. Sleep until then.
timers.sleeping = true;
runtime_noteclear(&timers.waitnote);
runtime_unlock(&timers);
runtime_notetsleepg(&timers.waitnote, delta);
}
}
// heap maintenance algorithms.
static void
siftup(int32 i)
{
int32 p;
int64 when;
Timer **t, *tmp;
t = timers.t;
when = t[i]->when;
tmp = t[i];
while(i > 0) {
p = (i-1)/4; // parent
if(when >= t[p]->when)
break;
t[i] = t[p];
t[i]->i = i;
t[p] = tmp;
tmp->i = p;
i = p;
}
}
static void
siftdown(int32 i)
{
int32 c, c3, len;
int64 when, w, w3;
Timer **t, *tmp;
t = timers.t;
len = timers.len;
when = t[i]->when;
tmp = t[i];
for(;;) {
c = i*4 + 1; // left child
c3 = c + 2; // mid child
if(c >= len) {
break;
}
w = t[c]->when;
if(c+1 < len && t[c+1]->when < w) {
w = t[c+1]->when;
c++;
}
if(c3 < len) {
w3 = t[c3]->when;
if(c3+1 < len && t[c3+1]->when < w3) {
w3 = t[c3+1]->when;
c3++;
}
if(w3 < w) {
w = w3;
c = c3;
}
}
if(w >= when)
break;
t[i] = t[c];
t[i]->i = i;
t[c] = tmp;
tmp->i = c;
i = c;
}
}
static void
dumptimers(const char *msg)
{
Timer *t;
int32 i;
runtime_printf("timers: %s\n", msg);
for(i = 0; i < timers.len; i++) {
t = timers.t[i];
runtime_printf("\t%d\t%p:\ti %d when %D period %D fn %p\n",
i, t, t->i, t->when, t->period, t->fv->fn);
}
runtime_printf("\n");
}
void
runtime_time_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
{
enqueue1(wbufp, (Obj){(byte*)&timers, sizeof timers, 0});
}