libgo: update to Go 1.8 release candidate 1

Compiler changes:
      * Change map assignment to use mapassign and assign value directly.
      * Change string iteration to use decoderune, faster for ASCII strings.
      * Change makeslice to take int, and use makeslice64 for larger values.
      * Add new noverflow field to hmap struct used for maps.
    
    Unresolved problems, to be fixed later:
      * Commented out test in go/types/sizes_test.go that doesn't compile.
      * Commented out reflect.TestStructOf test for padding after zero-sized field.
    
    Reviewed-on: https://go-review.googlesource.com/35231

gotools/:
	Updates for Go 1.8rc1.
	* Makefile.am (go_cmd_go_files): Add bug.go.
	(s-zdefaultcc): Write defaultPkgConfig.
	* Makefile.in: Rebuild.

From-SVN: r244456
This commit is contained in:
Ian Lance Taylor 2017-01-14 00:05:42 +00:00 committed by Ian Lance Taylor
parent 829afb8f05
commit c2047754c3
983 changed files with 69318 additions and 17662 deletions

View file

@ -1,4 +1,4 @@
153f7b68c0c4d3cf3da0becf82eb1a3eb8b47d6e 0ba4563a4b0dec4c01b90d7b3c9e2ce2cd58a96f
The first line of this file holds the git revision number of the last The first line of this file holds the git revision number of the last
merge done from the gofrontend repository. merge done from the gofrontend repository.

View file

@ -297,6 +297,7 @@ Node::op_format() const
case Runtime::MAKECHAN: case Runtime::MAKECHAN:
case Runtime::MAKEMAP: case Runtime::MAKEMAP:
case Runtime::MAKESLICE: case Runtime::MAKESLICE:
case Runtime::MAKESLICE64:
op << "make"; op << "make";
break; break;
@ -418,7 +419,8 @@ Node::is_big(Escape_context* context) const
Func_expression* fn = call->fn()->func_expression(); Func_expression* fn = call->fn()->func_expression();
if (fn != NULL if (fn != NULL
&& fn->is_runtime_function() && fn->is_runtime_function()
&& fn->runtime_code() == Runtime::MAKESLICE) && (fn->runtime_code() == Runtime::MAKESLICE
|| fn->runtime_code() == Runtime::MAKESLICE64))
{ {
// Second argument is length. // Second argument is length.
Expression_list::iterator p = call->args()->begin(); Expression_list::iterator p = call->args()->begin();
@ -1240,6 +1242,7 @@ Escape_analysis_assign::expression(Expression** pexpr)
case Runtime::MAKECHAN: case Runtime::MAKECHAN:
case Runtime::MAKEMAP: case Runtime::MAKEMAP:
case Runtime::MAKESLICE: case Runtime::MAKESLICE:
case Runtime::MAKESLICE64:
case Runtime::SLICEBYTETOSTRING: case Runtime::SLICEBYTETOSTRING:
case Runtime::SLICERUNETOSTRING: case Runtime::SLICERUNETOSTRING:
case Runtime::STRINGTOSLICEBYTE: case Runtime::STRINGTOSLICEBYTE:
@ -1849,6 +1852,7 @@ Escape_analysis_assign::assign(Node* dst, Node* src)
case Runtime::MAKECHAN: case Runtime::MAKECHAN:
case Runtime::MAKEMAP: case Runtime::MAKEMAP:
case Runtime::MAKESLICE: case Runtime::MAKESLICE:
case Runtime::MAKESLICE64:
// DST = make(...). // DST = make(...).
case Runtime::SLICEBYTETOSTRING: case Runtime::SLICEBYTETOSTRING:
// DST = string([]byte{...}). // DST = string([]byte{...}).
@ -2623,6 +2627,7 @@ Escape_analysis_flood::flood(Level level, Node* dst, Node* src,
case Runtime::MAKECHAN: case Runtime::MAKECHAN:
case Runtime::MAKEMAP: case Runtime::MAKEMAP:
case Runtime::MAKESLICE: case Runtime::MAKESLICE:
case Runtime::MAKESLICE64:
case Runtime::SLICEBYTETOSTRING: case Runtime::SLICEBYTETOSTRING:
case Runtime::SLICERUNETOSTRING: case Runtime::SLICERUNETOSTRING:
case Runtime::STRINGTOSLICEBYTE: case Runtime::STRINGTOSLICEBYTE:

View file

@ -7091,7 +7091,7 @@ class Builtin_call_expression : public Call_expression
Expression* flatten_append(Gogo*, Named_object*, Statement_inserter*); Expression* flatten_append(Gogo*, Named_object*, Statement_inserter*);
bool bool
check_int_value(Expression*, bool is_length); check_int_value(Expression*, bool is_length, bool* small);
// A pointer back to the general IR structure. This avoids a global // A pointer back to the general IR structure. This avoids a global
// variable, or passing it around everywhere. // variable, or passing it around everywhere.
@ -7462,6 +7462,7 @@ Builtin_call_expression::lower_make(Statement_inserter* inserter)
++parg; ++parg;
Expression* len_arg; Expression* len_arg;
bool len_small = false;
if (parg == args->end()) if (parg == args->end())
{ {
if (is_slice) if (is_slice)
@ -7475,17 +7476,18 @@ Builtin_call_expression::lower_make(Statement_inserter* inserter)
{ {
len_arg = *parg; len_arg = *parg;
len_arg->determine_type(&int_context); len_arg->determine_type(&int_context);
if (!this->check_int_value(len_arg, true)) if (!this->check_int_value(len_arg, true, &len_small))
return Expression::make_error(this->location()); return Expression::make_error(this->location());
++parg; ++parg;
} }
Expression* cap_arg = NULL; Expression* cap_arg = NULL;
bool cap_small = false;
if (is_slice && parg != args->end()) if (is_slice && parg != args->end())
{ {
cap_arg = *parg; cap_arg = *parg;
cap_arg->determine_type(&int_context); cap_arg->determine_type(&int_context);
if (!this->check_int_value(cap_arg, false)) if (!this->check_int_value(cap_arg, false, &cap_small))
return Expression::make_error(this->location()); return Expression::make_error(this->location());
Numeric_constant nclen; Numeric_constant nclen;
@ -7526,9 +7528,13 @@ Builtin_call_expression::lower_make(Statement_inserter* inserter)
inserter->insert(temp); inserter->insert(temp);
len_arg = Expression::make_temporary_reference(temp, loc); len_arg = Expression::make_temporary_reference(temp, loc);
cap_arg = Expression::make_temporary_reference(temp, loc); cap_arg = Expression::make_temporary_reference(temp, loc);
cap_small = len_small;
} }
call = Runtime::make_call(Runtime::MAKESLICE, loc, 3, type_arg,
len_arg, cap_arg); Runtime::Function code = Runtime::MAKESLICE;
if (!len_small || !cap_small)
code = Runtime::MAKESLICE64;
call = Runtime::make_call(code, loc, 3, type_arg, len_arg, cap_arg);
} }
else if (is_map) else if (is_map)
{ {
@ -7744,11 +7750,14 @@ Builtin_call_expression::flatten_append(Gogo* gogo, Named_object* function,
// Return whether an expression has an integer value. Report an error // Return whether an expression has an integer value. Report an error
// if not. This is used when handling calls to the predeclared make // if not. This is used when handling calls to the predeclared make
// function. // function. Set *SMALL if the value is known to fit in type "int".
bool bool
Builtin_call_expression::check_int_value(Expression* e, bool is_length) Builtin_call_expression::check_int_value(Expression* e, bool is_length,
bool *small)
{ {
*small = false;
Numeric_constant nc; Numeric_constant nc;
if (e->numeric_constant_value(&nc)) if (e->numeric_constant_value(&nc))
{ {
@ -7784,11 +7793,22 @@ Builtin_call_expression::check_int_value(Expression* e, bool is_length)
return false; return false;
} }
*small = true;
return true; return true;
} }
if (e->type()->integer_type() != NULL) if (e->type()->integer_type() != NULL)
return true; {
int ebits = e->type()->integer_type()->bits();
int intbits = Type::lookup_integer_type("int")->integer_type()->bits();
// We can treat ebits == intbits as small even for an unsigned
// integer type, because we will convert the value to int and
// then reject it in the runtime if it is negative.
*small = ebits <= intbits;
return true;
}
go_error_at(e->location(), "non-integer %s argument to make", go_error_at(e->location(), "non-integer %s argument to make",
is_length ? "len" : "cap"); is_length ? "len" : "cap");

View file

@ -31,12 +31,9 @@
// The standard C memcmp function, used for struct comparisons. // The standard C memcmp function, used for struct comparisons.
DEF_GO_RUNTIME(MEMCMP, "__go_memcmp", P3(POINTER, POINTER, UINTPTR), R1(INT)) DEF_GO_RUNTIME(MEMCMP, "__go_memcmp", P3(POINTER, POINTER, UINTPTR), R1(INT))
// Range over a string, returning the next index. // Decode a non-ASCII rune from a string.
DEF_GO_RUNTIME(STRINGITER, "runtime.stringiter", P2(STRING, INT), R1(INT)) DEF_GO_RUNTIME(DECODERUNE, "runtime.decoderune", P2(STRING, INT),
R2(RUNE, INT))
// Range over a string, returning the next index and character.
DEF_GO_RUNTIME(STRINGITER2, "runtime.stringiter2", P2(STRING, INT),
R2(INT, RUNE))
// Concatenate strings. // Concatenate strings.
DEF_GO_RUNTIME(CONCATSTRINGS, "runtime.concatstrings", P2(POINTER, SLICE), DEF_GO_RUNTIME(CONCATSTRINGS, "runtime.concatstrings", P2(POINTER, SLICE),
@ -87,7 +84,10 @@ DEF_GO_RUNTIME(COMPLEX128_DIV, "__go_complex128_div",
P2(COMPLEX128, COMPLEX128), R1(COMPLEX128)) P2(COMPLEX128, COMPLEX128), R1(COMPLEX128))
// Make a slice. // Make a slice.
DEF_GO_RUNTIME(MAKESLICE, "runtime.makeslice", P3(TYPE, INT64, INT64), DEF_GO_RUNTIME(MAKESLICE, "runtime.makeslice", P3(TYPE, INT, INT),
R1(SLICE))
DEF_GO_RUNTIME(MAKESLICE64, "runtime.makeslice64", P3(TYPE, INT64, INT64),
R1(SLICE)) R1(SLICE))
@ -119,8 +119,8 @@ DEF_GO_RUNTIME(MAPACCESS2_FAT, "runtime.mapaccess2_fat",
P4(TYPE, MAP, POINTER, POINTER), R2(POINTER, BOOL)) P4(TYPE, MAP, POINTER, POINTER), R2(POINTER, BOOL))
// Assignment to a key in a map. // Assignment to a key in a map.
DEF_GO_RUNTIME(MAPASSIGN, "runtime.mapassign1", DEF_GO_RUNTIME(MAPASSIGN, "runtime.mapassign", P3(TYPE, MAP, POINTER),
P4(TYPE, MAP, POINTER, POINTER), R0()) R1(POINTER))
// Delete a key from a map. // Delete a key from a map.
DEF_GO_RUNTIME(MAPDELETE, "runtime.mapdelete", P3(TYPE, MAP, POINTER), R0()) DEF_GO_RUNTIME(MAPDELETE, "runtime.mapdelete", P3(TYPE, MAP, POINTER), R0())

View file

@ -707,8 +707,8 @@ Assignment_statement::do_lower(Gogo*, Named_object*, Block* enclosing,
Move_ordered_evals moe(b); Move_ordered_evals moe(b);
mie->traverse_subexpressions(&moe); mie->traverse_subexpressions(&moe);
// Copy key and value into temporaries so that we can take their // Copy the key into a temporary so that we can take its address
// address without pushing the value onto the heap. // without pushing the value onto the heap.
// var key_temp KEY_TYPE = MAP_INDEX // var key_temp KEY_TYPE = MAP_INDEX
Temporary_statement* key_temp = Statement::make_temporary(mt->key_type(), Temporary_statement* key_temp = Statement::make_temporary(mt->key_type(),
@ -716,23 +716,29 @@ Assignment_statement::do_lower(Gogo*, Named_object*, Block* enclosing,
loc); loc);
b->add_statement(key_temp); b->add_statement(key_temp);
// Copy the value into a temporary to ensure that it is
// evaluated before we add the key to the map. This may matter
// if the value is itself a reference to the map.
// var val_temp VAL_TYPE = RHS // var val_temp VAL_TYPE = RHS
Temporary_statement* val_temp = Statement::make_temporary(mt->val_type(), Temporary_statement* val_temp = Statement::make_temporary(mt->val_type(),
this->rhs_, this->rhs_,
loc); loc);
b->add_statement(val_temp); b->add_statement(val_temp);
// mapassign1(TYPE, MAP, &key_temp, &val_temp) // *mapassign(TYPE, MAP, &key_temp) = RHS
Expression* a1 = Expression::make_type_descriptor(mt, loc); Expression* a1 = Expression::make_type_descriptor(mt, loc);
Expression* a2 = mie->map(); Expression* a2 = mie->map();
Temporary_reference_expression* ref = Temporary_reference_expression* ref =
Expression::make_temporary_reference(key_temp, loc); Expression::make_temporary_reference(key_temp, loc);
Expression* a3 = Expression::make_unary(OPERATOR_AND, ref, loc); Expression* a3 = Expression::make_unary(OPERATOR_AND, ref, loc);
Expression* call = Runtime::make_call(Runtime::MAPASSIGN, loc, 3,
a1, a2, a3);
Type* ptrval_type = Type::make_pointer_type(mt->val_type());
call = Expression::make_cast(ptrval_type, call, loc);
Expression* indir = Expression::make_unary(OPERATOR_MULT, call, loc);
ref = Expression::make_temporary_reference(val_temp, loc); ref = Expression::make_temporary_reference(val_temp, loc);
Expression* a4 = Expression::make_unary(OPERATOR_AND, ref, loc); b->add_statement(Statement::make_assignment(indir, ref, loc));
Expression* call = Runtime::make_call(Runtime::MAPASSIGN, loc, 4,
a1, a2, a3, a4);
b->add_statement(Statement::make_statement(call, false));
return Statement::make_block_statement(b, loc); return Statement::make_block_statement(b, loc);
} }
@ -5313,7 +5319,7 @@ For_range_statement::do_lower(Gogo* gogo, Named_object*, Block* enclosing,
else if (range_type->is_string_type()) else if (range_type->is_string_type())
{ {
index_type = Type::lookup_integer_type("int"); index_type = Type::lookup_integer_type("int");
value_type = Type::lookup_integer_type("int32"); value_type = gogo->lookup_global("rune")->type_value();
} }
else if (range_type->map_type() != NULL) else if (range_type->map_type() != NULL)
{ {
@ -5458,7 +5464,7 @@ For_range_statement::make_range_ref(Named_object* range_object,
// Return a call to the predeclared function FUNCNAME passing a // Return a call to the predeclared function FUNCNAME passing a
// reference to the temporary variable ARG. // reference to the temporary variable ARG.
Expression* Call_expression*
For_range_statement::call_builtin(Gogo* gogo, const char* funcname, For_range_statement::call_builtin(Gogo* gogo, const char* funcname,
Expression* arg, Expression* arg,
Location loc) Location loc)
@ -5664,7 +5670,7 @@ For_range_statement::lower_range_slice(Gogo* gogo,
// Lower a for range over a string. // Lower a for range over a string.
void void
For_range_statement::lower_range_string(Gogo*, For_range_statement::lower_range_string(Gogo* gogo,
Block* enclosing, Block* enclosing,
Block* body_block, Block* body_block,
Named_object* range_object, Named_object* range_object,
@ -5679,94 +5685,121 @@ For_range_statement::lower_range_string(Gogo*,
Location loc = this->location(); Location loc = this->location();
// The loop we generate: // The loop we generate:
// len_temp := len(range)
// var next_index_temp int // var next_index_temp int
// for index_temp = 0; ; index_temp = next_index_temp { // for index_temp = 0; index_temp < len_temp; index_temp = next_index_temp {
// next_index_temp, value_temp = stringiter2(range, index_temp) // value_temp = rune(range[index_temp])
// if next_index_temp == 0 { // if value_temp < utf8.RuneSelf {
// break // next_index_temp = index_temp + 1
// } else {
// value_temp, next_index_temp = decoderune(range, index_temp)
// } // }
// index = index_temp // index = index_temp
// value = value_temp // value = value_temp
// original body // // original body
// } // }
// Set *PINIT to // Set *PINIT to
// len_temp := len(range)
// var next_index_temp int // var next_index_temp int
// index_temp = 0 // index_temp = 0
// var value_temp rune // if value_temp not passed in
Block* init = new Block(enclosing, loc); Block* init = new Block(enclosing, loc);
Expression* ref = this->make_range_ref(range_object, range_temp, loc);
Call_expression* call = this->call_builtin(gogo, "len", ref, loc);
Temporary_statement* len_temp =
Statement::make_temporary(index_temp->type(), call, loc);
init->add_statement(len_temp);
Temporary_statement* next_index_temp = Temporary_statement* next_index_temp =
Statement::make_temporary(index_temp->type(), NULL, loc); Statement::make_temporary(index_temp->type(), NULL, loc);
init->add_statement(next_index_temp); init->add_statement(next_index_temp);
Expression* zexpr = Expression::make_integer_ul(0, NULL, loc); Temporary_reference_expression* index_ref =
Temporary_reference_expression* ref =
Expression::make_temporary_reference(index_temp, loc); Expression::make_temporary_reference(index_temp, loc);
ref->set_is_lvalue(); index_ref->set_is_lvalue();
Statement* s = Statement::make_assignment(ref, zexpr, loc); Expression* zexpr = Expression::make_integer_ul(0, index_temp->type(), loc);
Statement* s = Statement::make_assignment(index_ref, zexpr, loc);
init->add_statement(s); init->add_statement(s);
Type* rune_type;
if (value_temp != NULL)
rune_type = value_temp->type();
else
{
rune_type = gogo->lookup_global("rune")->type_value();
value_temp = Statement::make_temporary(rune_type, NULL, loc);
init->add_statement(value_temp);
}
*pinit = init; *pinit = init;
// The loop has no condition. // Set *PCOND to
// index_temp < len_temp
*pcond = NULL; index_ref = Expression::make_temporary_reference(index_temp, loc);
Expression* len_ref =
Expression::make_temporary_reference(len_temp, loc);
*pcond = Expression::make_binary(OPERATOR_LT, index_ref, len_ref, loc);
// Set *PITER_INIT to // Set *PITER_INIT to
// next_index_temp = runtime.stringiter(range, index_temp) // value_temp = rune(range[index_temp])
// or // if value_temp < utf8.RuneSelf {
// next_index_temp, value_temp = runtime.stringiter2(range, index_temp) // next_index_temp = index_temp + 1
// followed by // } else {
// if next_index_temp == 0 { // value_temp, next_index_temp = decoderune(range, index_temp)
// break
// } // }
Block* iter_init = new Block(body_block, loc); Block* iter_init = new Block(body_block, loc);
Expression* p1 = this->make_range_ref(range_object, range_temp, loc); ref = this->make_range_ref(range_object, range_temp, loc);
Expression* p2 = Expression::make_temporary_reference(index_temp, loc); index_ref = Expression::make_temporary_reference(index_temp, loc);
Call_expression* call = Runtime::make_call((value_temp == NULL ref = Expression::make_string_index(ref, index_ref, NULL, loc);
? Runtime::STRINGITER ref = Expression::make_cast(rune_type, ref, loc);
: Runtime::STRINGITER2), Temporary_reference_expression* value_ref =
loc, 2, p1, p2); Expression::make_temporary_reference(value_temp, loc);
value_ref->set_is_lvalue();
if (value_temp == NULL) s = Statement::make_assignment(value_ref, ref, loc);
{
ref = Expression::make_temporary_reference(next_index_temp, loc);
ref->set_is_lvalue();
s = Statement::make_assignment(ref, call, loc);
}
else
{
Expression_list* lhs = new Expression_list();
ref = Expression::make_temporary_reference(next_index_temp, loc);
ref->set_is_lvalue();
lhs->push_back(ref);
ref = Expression::make_temporary_reference(value_temp, loc);
ref->set_is_lvalue();
lhs->push_back(ref);
Expression_list* rhs = new Expression_list();
rhs->push_back(Expression::make_call_result(call, 0));
rhs->push_back(Expression::make_call_result(call, 1));
s = Statement::make_tuple_assignment(lhs, rhs, loc);
}
iter_init->add_statement(s); iter_init->add_statement(s);
ref = Expression::make_temporary_reference(next_index_temp, loc); value_ref = Expression::make_temporary_reference(value_temp, loc);
zexpr = Expression::make_integer_ul(0, NULL, loc); Expression* rune_self = Expression::make_integer_ul(0x80, rune_type, loc);
Expression* equals = Expression::make_binary(OPERATOR_EQEQ, ref, zexpr, loc); Expression* cond = Expression::make_binary(OPERATOR_LT, value_ref, rune_self,
loc);
Block* then_block = new Block(iter_init, loc); Block* then_block = new Block(iter_init, loc);
s = Statement::make_break_statement(this->break_label(), loc);
Temporary_reference_expression* lhs =
Expression::make_temporary_reference(next_index_temp, loc);
lhs->set_is_lvalue();
index_ref = Expression::make_temporary_reference(index_temp, loc);
Expression* one = Expression::make_integer_ul(1, index_temp->type(), loc);
Expression* sum = Expression::make_binary(OPERATOR_PLUS, index_ref, one,
loc);
s = Statement::make_assignment(lhs, sum, loc);
then_block->add_statement(s); then_block->add_statement(s);
s = Statement::make_if_statement(equals, then_block, NULL, loc); Block* else_block = new Block(iter_init, loc);
ref = this->make_range_ref(range_object, range_temp, loc);
index_ref = Expression::make_temporary_reference(index_temp, loc);
call = Runtime::make_call(Runtime::DECODERUNE, loc, 2, ref, index_ref);
value_ref = Expression::make_temporary_reference(value_temp, loc);
value_ref->set_is_lvalue();
Expression* res = Expression::make_call_result(call, 0);
s = Statement::make_assignment(value_ref, res, loc);
else_block->add_statement(s);
lhs = Expression::make_temporary_reference(next_index_temp, loc);
lhs->set_is_lvalue();
res = Expression::make_call_result(call, 1);
s = Statement::make_assignment(lhs, res, loc);
else_block->add_statement(s);
s = Statement::make_if_statement(cond, then_block, else_block, loc);
iter_init->add_statement(s); iter_init->add_statement(s);
*piter_init = iter_init; *piter_init = iter_init;
@ -5776,11 +5809,10 @@ For_range_statement::lower_range_string(Gogo*,
Block* post = new Block(enclosing, loc); Block* post = new Block(enclosing, loc);
Temporary_reference_expression* lhs = index_ref = Expression::make_temporary_reference(index_temp, loc);
Expression::make_temporary_reference(index_temp, loc); index_ref->set_is_lvalue();
lhs->set_is_lvalue(); ref = Expression::make_temporary_reference(next_index_temp, loc);
Expression* rhs = Expression::make_temporary_reference(next_index_temp, loc); s = Statement::make_assignment(index_ref, ref, loc);
s = Statement::make_assignment(lhs, rhs, loc);
post->add_statement(s); post->add_statement(s);
*ppost = post; *ppost = post;

View file

@ -1500,7 +1500,7 @@ class For_range_statement : public Statement
Expression* Expression*
make_range_ref(Named_object*, Temporary_statement*, Location); make_range_ref(Named_object*, Temporary_statement*, Location);
Expression* Call_expression*
call_builtin(Gogo*, const char* funcname, Expression* arg, Location); call_builtin(Gogo*, const char* funcname, Expression* arg, Location);
void void

View file

@ -7319,7 +7319,7 @@ Map_type::do_get_backend(Gogo* gogo)
static Btype* backend_map_type; static Btype* backend_map_type;
if (backend_map_type == NULL) if (backend_map_type == NULL)
{ {
std::vector<Backend::Btyped_identifier> bfields(8); std::vector<Backend::Btyped_identifier> bfields(9);
Location bloc = Linemap::predeclared_location(); Location bloc = Linemap::predeclared_location();
@ -7337,30 +7337,35 @@ Map_type::do_get_backend(Gogo* gogo)
bfields[2].btype = bfields[1].btype; bfields[2].btype = bfields[1].btype;
bfields[2].location = bloc; bfields[2].location = bloc;
Type* uint32_type = Type::lookup_integer_type("uint32"); Type* uint16_type = Type::lookup_integer_type("uint16");
bfields[3].name = "hash0"; bfields[3].name = "noverflow";
bfields[3].btype = uint32_type->get_backend(gogo); bfields[3].btype = uint16_type->get_backend(gogo);
bfields[3].location = bloc; bfields[3].location = bloc;
Type* uint32_type = Type::lookup_integer_type("uint32");
bfields[4].name = "hash0";
bfields[4].btype = uint32_type->get_backend(gogo);
bfields[4].location = bloc;
Btype* bvt = gogo->backend()->void_type(); Btype* bvt = gogo->backend()->void_type();
Btype* bpvt = gogo->backend()->pointer_type(bvt); Btype* bpvt = gogo->backend()->pointer_type(bvt);
bfields[4].name = "buckets"; bfields[5].name = "buckets";
bfields[4].btype = bpvt;
bfields[4].location = bloc;
bfields[5].name = "oldbuckets";
bfields[5].btype = bpvt; bfields[5].btype = bpvt;
bfields[5].location = bloc; bfields[5].location = bloc;
Type* uintptr_type = Type::lookup_integer_type("uintptr"); bfields[6].name = "oldbuckets";
bfields[6].name = "nevacuate"; bfields[6].btype = bpvt;
bfields[6].btype = uintptr_type->get_backend(gogo);
bfields[6].location = bloc; bfields[6].location = bloc;
bfields[7].name = "overflow"; Type* uintptr_type = Type::lookup_integer_type("uintptr");
bfields[7].btype = bpvt; bfields[7].name = "nevacuate";
bfields[7].btype = uintptr_type->get_backend(gogo);
bfields[7].location = bloc; bfields[7].location = bloc;
bfields[8].name = "overflow";
bfields[8].btype = bpvt;
bfields[8].location = bloc;
Btype *bt = gogo->backend()->struct_type(bfields); Btype *bt = gogo->backend()->struct_type(bfields);
bt = gogo->backend()->named_type("runtime.hmap", bt, bloc); bt = gogo->backend()->named_type("runtime.hmap", bt, bloc);
backend_map_type = gogo->backend()->pointer_type(bt); backend_map_type = gogo->backend()->pointer_type(bt);

View file

@ -1,3 +1,10 @@
2017-01-13 Ian Lance Taylor <iant@golang.org>
Updates for Go 1.8rc1.
* Makefile.am (go_cmd_go_files): Add bug.go.
(s-zdefaultcc): Write defaultPkgConfig.
* Makefile.in: Rebuild.
2016-06-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> 2016-06-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
Update copyrights. Update copyrights.

View file

@ -45,6 +45,7 @@ cmdsrcdir = $(srcdir)/../libgo/go/cmd
go_cmd_go_files = \ go_cmd_go_files = \
$(cmdsrcdir)/go/alldocs.go \ $(cmdsrcdir)/go/alldocs.go \
$(cmdsrcdir)/go/bug.go \
$(cmdsrcdir)/go/build.go \ $(cmdsrcdir)/go/build.go \
$(cmdsrcdir)/go/clean.go \ $(cmdsrcdir)/go/clean.go \
$(cmdsrcdir)/go/context.go \ $(cmdsrcdir)/go/context.go \
@ -99,6 +100,7 @@ s-zdefaultcc: Makefile
echo 'const defaultGCCGO = "$(bindir)/$(GCCGO_INSTALL_NAME)"' >> zdefaultcc.go.tmp echo 'const defaultGCCGO = "$(bindir)/$(GCCGO_INSTALL_NAME)"' >> zdefaultcc.go.tmp
echo 'const defaultCC = "$(bindir)/$(GCC_INSTALL_NAME)"' >> zdefaultcc.go.tmp echo 'const defaultCC = "$(bindir)/$(GCC_INSTALL_NAME)"' >> zdefaultcc.go.tmp
echo 'const defaultCXX = "$(bindir)/$(GXX_INSTALL_NAME)"' >> zdefaultcc.go.tmp echo 'const defaultCXX = "$(bindir)/$(GXX_INSTALL_NAME)"' >> zdefaultcc.go.tmp
echo 'const defaultPkgConfig = "pkg-config"' >> zdefaultcc.go.tmp
$(SHELL) $(srcdir)/../move-if-change zdefaultcc.go.tmp zdefaultcc.go $(SHELL) $(srcdir)/../move-if-change zdefaultcc.go.tmp zdefaultcc.go
$(STAMP) $@ $(STAMP) $@

View file

@ -16,7 +16,7 @@
@SET_MAKE@ @SET_MAKE@
# Makefile for gotools # Makefile for gotools
# Copyright 2015 Free Software Foundation, Inc. # Copyright (C) 2015-2016 Free Software Foundation, Inc.
# #
# This file is free software; you can redistribute it and/or modify # This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by # it under the terms of the GNU General Public License as published by
@ -263,6 +263,7 @@ GOLINK = $(GOCOMPILER) $(GOCFLAGS) $(AM_GOCFLAGS) $(LDFLAGS) $(AM_LDFLAGS) -o $@
cmdsrcdir = $(srcdir)/../libgo/go/cmd cmdsrcdir = $(srcdir)/../libgo/go/cmd
go_cmd_go_files = \ go_cmd_go_files = \
$(cmdsrcdir)/go/alldocs.go \ $(cmdsrcdir)/go/alldocs.go \
$(cmdsrcdir)/go/bug.go \
$(cmdsrcdir)/go/build.go \ $(cmdsrcdir)/go/build.go \
$(cmdsrcdir)/go/clean.go \ $(cmdsrcdir)/go/clean.go \
$(cmdsrcdir)/go/context.go \ $(cmdsrcdir)/go/context.go \
@ -664,6 +665,7 @@ s-zdefaultcc: Makefile
echo 'const defaultGCCGO = "$(bindir)/$(GCCGO_INSTALL_NAME)"' >> zdefaultcc.go.tmp echo 'const defaultGCCGO = "$(bindir)/$(GCCGO_INSTALL_NAME)"' >> zdefaultcc.go.tmp
echo 'const defaultCC = "$(bindir)/$(GCC_INSTALL_NAME)"' >> zdefaultcc.go.tmp echo 'const defaultCC = "$(bindir)/$(GCC_INSTALL_NAME)"' >> zdefaultcc.go.tmp
echo 'const defaultCXX = "$(bindir)/$(GXX_INSTALL_NAME)"' >> zdefaultcc.go.tmp echo 'const defaultCXX = "$(bindir)/$(GXX_INSTALL_NAME)"' >> zdefaultcc.go.tmp
echo 'const defaultPkgConfig = "pkg-config"' >> zdefaultcc.go.tmp
$(SHELL) $(srcdir)/../move-if-change zdefaultcc.go.tmp zdefaultcc.go $(SHELL) $(srcdir)/../move-if-change zdefaultcc.go.tmp zdefaultcc.go
$(STAMP) $@ $(STAMP) $@

View file

@ -1,4 +1,4 @@
f75aafdf56dd90eab75cfeac8cf69358f73ba171 3de6e96e4b8147f5267a2e8218a7c780b09a434f
The first line of this file holds the git revision number of the The first line of this file holds the git revision number of the
last merge done from the master library sources. last merge done from the master library sources.

View file

@ -346,7 +346,8 @@ toolexeclibgoruntimedir = $(toolexeclibgodir)/runtime
toolexeclibgoruntime_DATA = \ toolexeclibgoruntime_DATA = \
runtime/debug.gox \ runtime/debug.gox \
runtime/pprof.gox runtime/pprof.gox \
runtime/trace.gox
toolexeclibgosyncdir = $(toolexeclibgodir)/sync toolexeclibgosyncdir = $(toolexeclibgodir)/sync
@ -669,6 +670,7 @@ PACKAGES = \
archive/zip \ archive/zip \
bufio \ bufio \
bytes \ bytes \
cmd/internal/browser \
compress/bzip2 \ compress/bzip2 \
compress/flate \ compress/flate \
compress/gzip \ compress/gzip \
@ -686,6 +688,7 @@ PACKAGES = \
crypto/ecdsa \ crypto/ecdsa \
crypto/elliptic \ crypto/elliptic \
crypto/hmac \ crypto/hmac \
crypto/internal/cipherhw \
crypto/md5 \ crypto/md5 \
crypto/rand \ crypto/rand \
crypto/rc4 \ crypto/rc4 \
@ -736,8 +739,16 @@ PACKAGES = \
go/scanner \ go/scanner \
go/token \ go/token \
go/types \ go/types \
golang_org/x/crypto/chacha20poly1305 \
golang_org/x/crypto/chacha20poly1305/internal/chacha20 \
golang_org/x/crypto/curve25519 \
golang_org/x/crypto/poly1305 \
golang_org/x/net/http2/hpack \ golang_org/x/net/http2/hpack \
golang_org/x/net/idna \
golang_org/x/net/lex/httplex \ golang_org/x/net/lex/httplex \
golang_org/x/text/transform \
golang_org/x/text/unicode/norm \
golang_org/x/text/width \
hash \ hash \
hash/adler32 \ hash/adler32 \
hash/crc32 \ hash/crc32 \
@ -755,6 +766,7 @@ PACKAGES = \
image/png \ image/png \
index/suffixarray \ index/suffixarray \
internal/nettrace \ internal/nettrace \
internal/pprof/profile \
internal/race \ internal/race \
internal/singleflight \ internal/singleflight \
internal/syscall/unix \ internal/syscall/unix \
@ -802,6 +814,8 @@ PACKAGES = \
runtime/internal/atomic \ runtime/internal/atomic \
runtime/internal/sys \ runtime/internal/sys \
runtime/pprof \ runtime/pprof \
runtime/pprof/internal/protopprof \
runtime/trace \
sort \ sort \
strconv \ strconv \
strings \ strings \
@ -809,6 +823,7 @@ PACKAGES = \
sync/atomic \ sync/atomic \
syscall \ syscall \
testing \ testing \
testing/internal/testdeps \
testing/iotest \ testing/iotest \
testing/quick \ testing/quick \
text/scanner \ text/scanner \
@ -829,6 +844,7 @@ libgo_go_objs = \
syscall/errno.lo \ syscall/errno.lo \
syscall/signame.lo \ syscall/signame.lo \
syscall/wait.lo \ syscall/wait.lo \
$(golang_org_x_net_lif_lo) \
$(golang_org_x_net_route_lo) \ $(golang_org_x_net_route_lo) \
log/syslog/syslog_c.lo \ log/syslog/syslog_c.lo \
$(os_lib_inotify_lo) \ $(os_lib_inotify_lo) \
@ -1113,7 +1129,7 @@ if LIBGO_IS_BSD
# Build golang_org/x/net/route only on BSD systems. # Build golang_org/x/net/route only on BSD systems.
$(eval $(call PACKAGE_template,golang_org/x/net/route) $(eval $(call PACKAGE_template,golang_org/x/net/route))
golang_org_x_net_route_lo = \ golang_org_x_net_route_lo = \
golang_org/x/net/route/route.lo golang_org/x/net/route/route.lo
@ -1122,6 +1138,19 @@ golang_org_x_net_route_check = \
endif endif
if LIBGO_IS_SOLARIS
# Build golang_org/x/net/lif only on Solaris systems.
$(eval $(call PACKAGE_template,golang_org/x/net/lif))
golang_org_x_net_lif_lo = \
golang_org/x/net/lif/lif.lo
golang_org_x_net_lif_check = \
golang_org/x/net/lif/check
endif
TEST_PACKAGES = \ TEST_PACKAGES = \
bufio/check \ bufio/check \
bytes/check \ bytes/check \
@ -1209,8 +1238,14 @@ TEST_PACKAGES = \
go/scanner/check \ go/scanner/check \
go/token/check \ go/token/check \
go/types/check \ go/types/check \
golang_org/x/crypto/chacha20poly1305/check \
golang_org/x/crypto/chacha20poly1305/internal/chacha20/check \
golang_org/x/crypto/curve25519/check \
golang_org/x/crypto/poly1305/check \
golang_org/x/net/http2/hpack/check \ golang_org/x/net/http2/hpack/check \
golang_org/x/net/idna/check \
golang_org/x/net/lex/httplex/check \ golang_org/x/net/lex/httplex/check \
$(golang_org_x_net_lif_check) \
$(golang_org_x_net_route_check) \ $(golang_org_x_net_route_check) \
hash/adler32/check \ hash/adler32/check \
hash/crc32/check \ hash/crc32/check \
@ -1221,6 +1256,7 @@ TEST_PACKAGES = \
image/jpeg/check \ image/jpeg/check \
image/png/check \ image/png/check \
index/suffixarray/check \ index/suffixarray/check \
internal/pprof/profile/check \
internal/singleflight/check \ internal/singleflight/check \
internal/trace/check \ internal/trace/check \
io/ioutil/check \ io/ioutil/check \
@ -1251,9 +1287,10 @@ TEST_PACKAGES = \
path/filepath/check \ path/filepath/check \
regexp/syntax/check \ regexp/syntax/check \
runtime/debug/check \ runtime/debug/check \
runtime/pprof/check \
runtime/internal/atomic/check \ runtime/internal/atomic/check \
runtime/internal/sys/check \ runtime/internal/sys/check \
runtime/pprof/check \
runtime/pprof/internal/protopprof/check \
sync/atomic/check \ sync/atomic/check \
text/scanner/check \ text/scanner/check \
text/tabwriter/check \ text/tabwriter/check \

View file

@ -168,9 +168,9 @@ LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
am__DEPENDENCIES_2 = $(addsuffix .lo,$(PACKAGES)) bytes/index.lo \ am__DEPENDENCIES_2 = $(addsuffix .lo,$(PACKAGES)) bytes/index.lo \
reflect/makefunc_ffi_c.lo strings/index.lo \ reflect/makefunc_ffi_c.lo strings/index.lo \
$(am__DEPENDENCIES_1) syscall/errno.lo syscall/signame.lo \ $(am__DEPENDENCIES_1) syscall/errno.lo syscall/signame.lo \
syscall/wait.lo $(golang_org_x_net_route_lo) \ syscall/wait.lo $(golang_org_x_net_lif_lo) \
log/syslog/syslog_c.lo runtime/internal/atomic_c.lo \ $(golang_org_x_net_route_lo) log/syslog/syslog_c.lo \
sync/atomic_c.lo runtime/internal/atomic_c.lo sync/atomic_c.lo
am__DEPENDENCIES_3 = am__DEPENDENCIES_3 =
am__DEPENDENCIES_4 = $(am__DEPENDENCIES_2) \ am__DEPENDENCIES_4 = $(am__DEPENDENCIES_2) \
../libbacktrace/libbacktrace.la $(am__DEPENDENCIES_3) \ ../libbacktrace/libbacktrace.la $(am__DEPENDENCIES_3) \
@ -728,7 +728,8 @@ toolexeclibgoregexp_DATA = \
toolexeclibgoruntimedir = $(toolexeclibgodir)/runtime toolexeclibgoruntimedir = $(toolexeclibgodir)/runtime
toolexeclibgoruntime_DATA = \ toolexeclibgoruntime_DATA = \
runtime/debug.gox \ runtime/debug.gox \
runtime/pprof.gox runtime/pprof.gox \
runtime/trace.gox
toolexeclibgosyncdir = $(toolexeclibgodir)/sync toolexeclibgosyncdir = $(toolexeclibgodir)/sync
toolexeclibgosync_DATA = \ toolexeclibgosync_DATA = \
@ -834,6 +835,7 @@ PACKAGES = \
archive/zip \ archive/zip \
bufio \ bufio \
bytes \ bytes \
cmd/internal/browser \
compress/bzip2 \ compress/bzip2 \
compress/flate \ compress/flate \
compress/gzip \ compress/gzip \
@ -851,6 +853,7 @@ PACKAGES = \
crypto/ecdsa \ crypto/ecdsa \
crypto/elliptic \ crypto/elliptic \
crypto/hmac \ crypto/hmac \
crypto/internal/cipherhw \
crypto/md5 \ crypto/md5 \
crypto/rand \ crypto/rand \
crypto/rc4 \ crypto/rc4 \
@ -901,8 +904,16 @@ PACKAGES = \
go/scanner \ go/scanner \
go/token \ go/token \
go/types \ go/types \
golang_org/x/crypto/chacha20poly1305 \
golang_org/x/crypto/chacha20poly1305/internal/chacha20 \
golang_org/x/crypto/curve25519 \
golang_org/x/crypto/poly1305 \
golang_org/x/net/http2/hpack \ golang_org/x/net/http2/hpack \
golang_org/x/net/idna \
golang_org/x/net/lex/httplex \ golang_org/x/net/lex/httplex \
golang_org/x/text/transform \
golang_org/x/text/unicode/norm \
golang_org/x/text/width \
hash \ hash \
hash/adler32 \ hash/adler32 \
hash/crc32 \ hash/crc32 \
@ -920,6 +931,7 @@ PACKAGES = \
image/png \ image/png \
index/suffixarray \ index/suffixarray \
internal/nettrace \ internal/nettrace \
internal/pprof/profile \
internal/race \ internal/race \
internal/singleflight \ internal/singleflight \
internal/syscall/unix \ internal/syscall/unix \
@ -967,6 +979,8 @@ PACKAGES = \
runtime/internal/atomic \ runtime/internal/atomic \
runtime/internal/sys \ runtime/internal/sys \
runtime/pprof \ runtime/pprof \
runtime/pprof/internal/protopprof \
runtime/trace \
sort \ sort \
strconv \ strconv \
strings \ strings \
@ -974,6 +988,7 @@ PACKAGES = \
sync/atomic \ sync/atomic \
syscall \ syscall \
testing \ testing \
testing/internal/testdeps \
testing/iotest \ testing/iotest \
testing/quick \ testing/quick \
text/scanner \ text/scanner \
@ -994,6 +1009,7 @@ libgo_go_objs = \
syscall/errno.lo \ syscall/errno.lo \
syscall/signame.lo \ syscall/signame.lo \
syscall/wait.lo \ syscall/wait.lo \
$(golang_org_x_net_lif_lo) \
$(golang_org_x_net_route_lo) \ $(golang_org_x_net_route_lo) \
log/syslog/syslog_c.lo \ log/syslog/syslog_c.lo \
$(os_lib_inotify_lo) \ $(os_lib_inotify_lo) \
@ -1161,6 +1177,12 @@ extra_go_files_runtime_internal_sys = version.go
@LIBGO_IS_BSD_TRUE@golang_org_x_net_route_check = \ @LIBGO_IS_BSD_TRUE@golang_org_x_net_route_check = \
@LIBGO_IS_BSD_TRUE@ golang_org/x/net/route/check @LIBGO_IS_BSD_TRUE@ golang_org/x/net/route/check
@LIBGO_IS_SOLARIS_TRUE@golang_org_x_net_lif_lo = \
@LIBGO_IS_SOLARIS_TRUE@ golang_org/x/net/lif/lif.lo
@LIBGO_IS_SOLARIS_TRUE@golang_org_x_net_lif_check = \
@LIBGO_IS_SOLARIS_TRUE@ golang_org/x/net/lif/check
TEST_PACKAGES = \ TEST_PACKAGES = \
bufio/check \ bufio/check \
bytes/check \ bytes/check \
@ -1248,8 +1270,14 @@ TEST_PACKAGES = \
go/scanner/check \ go/scanner/check \
go/token/check \ go/token/check \
go/types/check \ go/types/check \
golang_org/x/crypto/chacha20poly1305/check \
golang_org/x/crypto/chacha20poly1305/internal/chacha20/check \
golang_org/x/crypto/curve25519/check \
golang_org/x/crypto/poly1305/check \
golang_org/x/net/http2/hpack/check \ golang_org/x/net/http2/hpack/check \
golang_org/x/net/idna/check \
golang_org/x/net/lex/httplex/check \ golang_org/x/net/lex/httplex/check \
$(golang_org_x_net_lif_check) \
$(golang_org_x_net_route_check) \ $(golang_org_x_net_route_check) \
hash/adler32/check \ hash/adler32/check \
hash/crc32/check \ hash/crc32/check \
@ -1260,6 +1288,7 @@ TEST_PACKAGES = \
image/jpeg/check \ image/jpeg/check \
image/png/check \ image/png/check \
index/suffixarray/check \ index/suffixarray/check \
internal/pprof/profile/check \
internal/singleflight/check \ internal/singleflight/check \
internal/trace/check \ internal/trace/check \
io/ioutil/check \ io/ioutil/check \
@ -1290,9 +1319,10 @@ TEST_PACKAGES = \
path/filepath/check \ path/filepath/check \
regexp/syntax/check \ regexp/syntax/check \
runtime/debug/check \ runtime/debug/check \
runtime/pprof/check \
runtime/internal/atomic/check \ runtime/internal/atomic/check \
runtime/internal/sys/check \ runtime/internal/sys/check \
runtime/pprof/check \
runtime/pprof/internal/protopprof/check \
sync/atomic/check \ sync/atomic/check \
text/scanner/check \ text/scanner/check \
text/tabwriter/check \ text/tabwriter/check \
@ -3316,7 +3346,11 @@ syscall/wait.lo: go/syscall/wait.c runtime.inc
# Build golang_org/x/net/route only on BSD systems. # Build golang_org/x/net/route only on BSD systems.
@LIBGO_IS_BSD_TRUE@$(eval $(call PACKAGE_template,golang_org/x/net/route) @LIBGO_IS_BSD_TRUE@$(eval $(call PACKAGE_template,golang_org/x/net/route))
# Build golang_org/x/net/lif only on Solaris systems.
@LIBGO_IS_SOLARIS_TRUE@$(eval $(call PACKAGE_template,golang_org/x/net/lif))
check: check-tail check: check-tail
check-recursive: check-head check-recursive: check-head

View file

@ -1 +1 @@
go1.7.1 go1.8rc1

View file

@ -13,7 +13,6 @@
package tar package tar
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"os" "os"
@ -21,6 +20,10 @@ import (
"time" "time"
) )
// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit
// architectures. If a large value is encountered when decoding, the result
// stored in Header will be the truncated version.
// Header type flags. // Header type flags.
const ( const (
TypeReg = '0' // regular file TypeReg = '0' // regular file
@ -271,28 +274,6 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
return h, nil return h, nil
} }
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 {
return false
}
}
return true
}
func toASCII(s string) string {
if isASCII(s) {
return s
}
var buf bytes.Buffer
for _, c := range s {
if c < 0x80 {
buf.WriteByte(byte(c))
}
}
return buf.String()
}
// isHeaderOnlyType checks if the given type flag is of the type that has no // isHeaderOnlyType checks if the given type flag is of the type that has no
// data section even if a size is specified. // data section even if a size is specified.
func isHeaderOnlyType(flag byte) bool { func isHeaderOnlyType(flag byte) bool {

View file

@ -22,22 +22,20 @@ var (
ErrHeader = errors.New("archive/tar: invalid tar header") ErrHeader = errors.New("archive/tar: invalid tar header")
) )
const maxNanoSecondIntSize = 9
// A Reader provides sequential access to the contents of a tar archive. // A Reader provides sequential access to the contents of a tar archive.
// A tar archive consists of a sequence of files. // A tar archive consists of a sequence of files.
// The Next method advances to the next file in the archive (including the first), // The Next method advances to the next file in the archive (including the first),
// and then it can be treated as an io.Reader to access the file's data. // and then it can be treated as an io.Reader to access the file's data.
type Reader struct { type Reader struct {
r io.Reader r io.Reader
err error
pad int64 // amount of padding (ignored) after current file entry pad int64 // amount of padding (ignored) after current file entry
curr numBytesReader // reader for current file entry curr numBytesReader // reader for current file entry
blk block // buffer to use as temporary local storage blk block // buffer to use as temporary local storage
}
type parser struct { // err is a persistent error.
err error // Last error seen // It is only the responsibility of every exported method of Reader to
// ensure that this error is sticky.
err error
} }
// A numBytesReader is an io.Reader with a numBytes method, returning the number // A numBytesReader is an io.Reader with a numBytes method, returning the number
@ -108,8 +106,12 @@ func (tr *Reader) Next() (*Header, error) {
if tr.err != nil { if tr.err != nil {
return nil, tr.err return nil, tr.err
} }
hdr, err := tr.next()
tr.err = err
return hdr, err
}
var hdr *Header func (tr *Reader) next() (*Header, error) {
var extHdrs map[string]string var extHdrs map[string]string
// Externally, Next iterates through the tar archive as if it is a series of // Externally, Next iterates through the tar archive as if it is a series of
@ -119,29 +121,29 @@ func (tr *Reader) Next() (*Header, error) {
// one or more "header files" until it finds a "normal file". // one or more "header files" until it finds a "normal file".
loop: loop:
for { for {
tr.err = tr.skipUnread() if err := tr.skipUnread(); err != nil {
if tr.err != nil { return nil, err
return nil, tr.err
} }
hdr, rawHdr, err := tr.readHeader()
hdr = tr.readHeader() if err != nil {
if tr.err != nil { return nil, err
return nil, tr.err }
if err := tr.handleRegularFile(hdr); err != nil {
return nil, err
} }
// Check for PAX/GNU special headers and files. // Check for PAX/GNU special headers and files.
switch hdr.Typeflag { switch hdr.Typeflag {
case TypeXHeader: case TypeXHeader:
extHdrs, tr.err = parsePAX(tr) extHdrs, err = parsePAX(tr)
if tr.err != nil { if err != nil {
return nil, tr.err return nil, err
} }
continue loop // This is a meta header affecting the next header continue loop // This is a meta header affecting the next header
case TypeGNULongName, TypeGNULongLink: case TypeGNULongName, TypeGNULongLink:
var realname []byte realname, err := ioutil.ReadAll(tr)
realname, tr.err = ioutil.ReadAll(tr) if err != nil {
if tr.err != nil { return nil, err
return nil, tr.err
} }
// Convert GNU extensions to use PAX headers. // Convert GNU extensions to use PAX headers.
@ -156,31 +158,73 @@ loop:
extHdrs[paxLinkpath] = p.parseString(realname) extHdrs[paxLinkpath] = p.parseString(realname)
} }
if p.err != nil { if p.err != nil {
tr.err = p.err return nil, p.err
return nil, tr.err
} }
continue loop // This is a meta header affecting the next header continue loop // This is a meta header affecting the next header
default: default:
mergePAX(hdr, extHdrs) // The old GNU sparse format is handled here since it is technically
// just a regular file with additional attributes.
// Check for a PAX format sparse file if err := mergePAX(hdr, extHdrs); err != nil {
sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
if err != nil {
tr.err = err
return nil, err return nil, err
} }
if sp != nil {
// Current file is a PAX format GNU sparse file. // The extended headers may have updated the size.
// Set the current file reader to a sparse file reader. // Thus, setup the regFileReader again after merging PAX headers.
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) if err := tr.handleRegularFile(hdr); err != nil {
if tr.err != nil { return nil, err
return nil, tr.err
}
} }
break loop // This is a file, so stop
// Sparse formats rely on being able to read from the logical data
// section; there must be a preceding call to handleRegularFile.
if err := tr.handleSparseFile(hdr, rawHdr, extHdrs); err != nil {
return nil, err
}
return hdr, nil // This is a file, so stop
} }
} }
return hdr, nil }
// handleRegularFile sets up the current file reader and padding such that it
// can only read the following logical data section. It will properly handle
// special headers that contain no data section.
func (tr *Reader) handleRegularFile(hdr *Header) error {
nb := hdr.Size
if isHeaderOnlyType(hdr.Typeflag) {
nb = 0
}
if nb < 0 {
return ErrHeader
}
tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
tr.curr = &regFileReader{r: tr.r, nb: nb}
return nil
}
// handleSparseFile checks if the current file is a sparse format of any type
// and sets the curr reader appropriately.
func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block, extHdrs map[string]string) error {
var sp []sparseEntry
var err error
if hdr.Typeflag == TypeGNUSparse {
sp, err = tr.readOldGNUSparseMap(hdr, rawHdr)
if err != nil {
return err
}
} else {
sp, err = tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
if err != nil {
return err
}
}
// If sp is non-nil, then this is a sparse file.
// Note that it is possible for len(sp) to be zero.
if sp != nil {
tr.curr, err = newSparseFileReader(tr.curr, sp, hdr.Size)
}
return err
} }
// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then // checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
@ -219,13 +263,13 @@ func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]st
hdr.Name = sparseName hdr.Name = sparseName
} }
if sparseSizeOk { if sparseSizeOk {
realSize, err := strconv.ParseInt(sparseSize, 10, 0) realSize, err := strconv.ParseInt(sparseSize, 10, 64)
if err != nil { if err != nil {
return nil, ErrHeader return nil, ErrHeader
} }
hdr.Size = realSize hdr.Size = realSize
} else if sparseRealSizeOk { } else if sparseRealSizeOk {
realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) realSize, err := strconv.ParseInt(sparseRealSize, 10, 64)
if err != nil { if err != nil {
return nil, ErrHeader return nil, ErrHeader
} }
@ -249,53 +293,32 @@ func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]st
// in the header struct overwrite those found in the header // in the header struct overwrite those found in the header
// struct with higher precision or longer values. Esp. useful // struct with higher precision or longer values. Esp. useful
// for name and linkname fields. // for name and linkname fields.
func mergePAX(hdr *Header, headers map[string]string) error { func mergePAX(hdr *Header, headers map[string]string) (err error) {
var id64 int64
for k, v := range headers { for k, v := range headers {
switch k { switch k {
case paxPath: case paxPath:
hdr.Name = v hdr.Name = v
case paxLinkpath: case paxLinkpath:
hdr.Linkname = v hdr.Linkname = v
case paxGname:
hdr.Gname = v
case paxUname: case paxUname:
hdr.Uname = v hdr.Uname = v
case paxGname:
hdr.Gname = v
case paxUid: case paxUid:
uid, err := strconv.ParseInt(v, 10, 0) id64, err = strconv.ParseInt(v, 10, 64)
if err != nil { hdr.Uid = int(id64) // Integer overflow possible
return err
}
hdr.Uid = int(uid)
case paxGid: case paxGid:
gid, err := strconv.ParseInt(v, 10, 0) id64, err = strconv.ParseInt(v, 10, 64)
if err != nil { hdr.Gid = int(id64) // Integer overflow possible
return err
}
hdr.Gid = int(gid)
case paxAtime: case paxAtime:
t, err := parsePAXTime(v) hdr.AccessTime, err = parsePAXTime(v)
if err != nil {
return err
}
hdr.AccessTime = t
case paxMtime: case paxMtime:
t, err := parsePAXTime(v) hdr.ModTime, err = parsePAXTime(v)
if err != nil {
return err
}
hdr.ModTime = t
case paxCtime: case paxCtime:
t, err := parsePAXTime(v) hdr.ChangeTime, err = parsePAXTime(v)
if err != nil {
return err
}
hdr.ChangeTime = t
case paxSize: case paxSize:
size, err := strconv.ParseInt(v, 10, 0) hdr.Size, err = strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
hdr.Size = size
default: default:
if strings.HasPrefix(k, paxXattr) { if strings.HasPrefix(k, paxXattr) {
if hdr.Xattrs == nil { if hdr.Xattrs == nil {
@ -304,46 +327,13 @@ func mergePAX(hdr *Header, headers map[string]string) error {
hdr.Xattrs[k[len(paxXattr):]] = v hdr.Xattrs[k[len(paxXattr):]] = v
} }
} }
if err != nil {
return ErrHeader
}
} }
return nil return nil
} }
// parsePAXTime takes a string of the form %d.%d as described in
// the PAX specification.
func parsePAXTime(t string) (time.Time, error) {
buf := []byte(t)
pos := bytes.IndexByte(buf, '.')
var seconds, nanoseconds int64
var err error
if pos == -1 {
seconds, err = strconv.ParseInt(t, 10, 0)
if err != nil {
return time.Time{}, err
}
} else {
seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
if err != nil {
return time.Time{}, err
}
nanoBuf := string(buf[pos+1:])
// Pad as needed before converting to a decimal.
// For example .030 -> .030000000 -> 30000000 nanoseconds
if len(nanoBuf) < maxNanoSecondIntSize {
// Right pad
nanoBuf += strings.Repeat("0", maxNanoSecondIntSize-len(nanoBuf))
} else if len(nanoBuf) > maxNanoSecondIntSize {
// Right truncate
nanoBuf = nanoBuf[:maxNanoSecondIntSize]
}
nanoseconds, err = strconv.ParseInt(nanoBuf, 10, 0)
if err != nil {
return time.Time{}, err
}
}
ts := time.Unix(seconds, nanoseconds)
return ts, nil
}
// parsePAX parses PAX headers. // parsePAX parses PAX headers.
// If an extended header (type 'x') is invalid, ErrHeader is returned // If an extended header (type 'x') is invalid, ErrHeader is returned
func parsePAX(r io.Reader) (map[string]string, error) { func parsePAX(r io.Reader) (map[string]string, error) {
@ -354,12 +344,11 @@ func parsePAX(r io.Reader) (map[string]string, error) {
sbuf := string(buf) sbuf := string(buf)
// For GNU PAX sparse format 0.0 support. // For GNU PAX sparse format 0.0 support.
// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. // This function transforms the sparse format 0.0 headers into format 0.1
var sparseMap bytes.Buffer // headers since 0.0 headers were not PAX compliant.
var sparseMap []string
headers := make(map[string]string) extHdrs := make(map[string]string)
// Each record is constructed as
// "%d %s=%s\n", length, keyword, value
for len(sbuf) > 0 { for len(sbuf) > 0 {
key, value, residual, err := parsePAXRecord(sbuf) key, value, residual, err := parsePAXRecord(sbuf)
if err != nil { if err != nil {
@ -367,127 +356,29 @@ func parsePAX(r io.Reader) (map[string]string, error) {
} }
sbuf = residual sbuf = residual
keyStr := key switch key {
if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { case paxGNUSparseOffset, paxGNUSparseNumBytes:
// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. // Validate sparse header order and value.
sparseMap.WriteString(value) if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
sparseMap.Write([]byte{','}) (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
} else { strings.Contains(value, ",") {
// Normal key. Set the value in the headers map. return nil, ErrHeader
headers[keyStr] = value
}
}
if sparseMap.Len() != 0 {
// Add sparse info to headers, chopping off the extra comma
sparseMap.Truncate(sparseMap.Len() - 1)
headers[paxGNUSparseMap] = sparseMap.String()
}
return headers, nil
}
// parsePAXRecord parses the input PAX record string into a key-value pair.
// If parsing is successful, it will slice off the currently read record and
// return the remainder as r.
//
// A PAX record is of the following form:
// "%d %s=%s\n" % (size, key, value)
func parsePAXRecord(s string) (k, v, r string, err error) {
// The size field ends at the first space.
sp := strings.IndexByte(s, ' ')
if sp == -1 {
return "", "", s, ErrHeader
}
// Parse the first token as a decimal integer.
n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
if perr != nil || n < 5 || int64(len(s)) < n {
return "", "", s, ErrHeader
}
// Extract everything between the space and the final newline.
rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
if nl != "\n" {
return "", "", s, ErrHeader
}
// The first equals separates the key from the value.
eq := strings.IndexByte(rec, '=')
if eq == -1 {
return "", "", s, ErrHeader
}
return rec[:eq], rec[eq+1:], rem, nil
}
// parseString parses bytes as a NUL-terminated C-style string.
// If a NUL byte is not found then the whole slice is returned as a string.
func (*parser) parseString(b []byte) string {
n := 0
for n < len(b) && b[n] != 0 {
n++
}
return string(b[0:n])
}
// parseNumeric parses the input as being encoded in either base-256 or octal.
// This function may return negative numbers.
// If parsing fails or an integer overflow occurs, err will be set.
func (p *parser) parseNumeric(b []byte) int64 {
// Check for base-256 (binary) format first.
// If the first bit is set, then all following bits constitute a two's
// complement encoded number in big-endian byte order.
if len(b) > 0 && b[0]&0x80 != 0 {
// Handling negative numbers relies on the following identity:
// -a-1 == ^a
//
// If the number is negative, we use an inversion mask to invert the
// data bytes and treat the value as an unsigned number.
var inv byte // 0x00 if positive or zero, 0xff if negative
if b[0]&0x40 != 0 {
inv = 0xff
}
var x uint64
for i, c := range b {
c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
if i == 0 {
c &= 0x7f // Ignore signal bit in first byte
} }
if (x >> 56) > 0 { sparseMap = append(sparseMap, value)
p.err = ErrHeader // Integer overflow default:
return 0 // According to PAX specification, a value is stored only if it is
// non-empty. Otherwise, the key is deleted.
if len(value) > 0 {
extHdrs[key] = value
} else {
delete(extHdrs, key)
} }
x = x<<8 | uint64(c)
} }
if (x >> 63) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
if inv == 0xff {
return ^int64(x)
}
return int64(x)
} }
if len(sparseMap) > 0 {
// Normal case is base-8 (octal) format. extHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
return p.parseOctal(b)
}
func (p *parser) parseOctal(b []byte) int64 {
// Because unused fields are filled with NULs, we need
// to skip leading NULs. Fields may also be padded with
// spaces or NULs.
// So we remove leading and trailing NULs and spaces to
// be sure.
b = bytes.Trim(b, " \x00")
if len(b) == 0 {
return 0
} }
x, perr := strconv.ParseUint(p.parseString(b), 8, 64) return extHdrs, nil
if perr != nil {
p.err = ErrHeader
}
return int64(x)
} }
// skipUnread skips any unread bytes in the existing file entry, as well as any // skipUnread skips any unread bytes in the existing file entry, as well as any
@ -516,51 +407,46 @@ func (tr *Reader) skipUnread() error {
// Seek seems supported, so perform the real Seek. // Seek seems supported, so perform the real Seek.
pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent) pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent)
if err != nil { if err != nil {
tr.err = err return err
return tr.err
} }
seekSkipped = pos2 - pos1 seekSkipped = pos2 - pos1
} }
} }
var copySkipped int64 // Number of bytes skipped via CopyN copySkipped, err := io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) if err == io.EOF && seekSkipped+copySkipped < dataSkip {
if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip { err = io.ErrUnexpectedEOF
tr.err = io.ErrUnexpectedEOF
} }
return tr.err return err
} }
// readHeader reads the next block header and assumes that the underlying reader // readHeader reads the next block header and assumes that the underlying reader
// is already aligned to a block boundary. // is already aligned to a block boundary. It returns the raw block of the
// header in case further processing is required.
// //
// The err will be set to io.EOF only when one of the following occurs: // The err will be set to io.EOF only when one of the following occurs:
// * Exactly 0 bytes are read and EOF is hit. // * Exactly 0 bytes are read and EOF is hit.
// * Exactly 1 block of zeros is read and EOF is hit. // * Exactly 1 block of zeros is read and EOF is hit.
// * At least 2 blocks of zeros are read. // * At least 2 blocks of zeros are read.
func (tr *Reader) readHeader() *Header { func (tr *Reader) readHeader() (*Header, *block, error) {
if _, tr.err = io.ReadFull(tr.r, tr.blk[:]); tr.err != nil {
return nil // io.EOF is okay here
}
// Two blocks of zero bytes marks the end of the archive. // Two blocks of zero bytes marks the end of the archive.
if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
return nil, nil, err // EOF is okay here; exactly 0 bytes read
}
if bytes.Equal(tr.blk[:], zeroBlock[:]) { if bytes.Equal(tr.blk[:], zeroBlock[:]) {
if _, tr.err = io.ReadFull(tr.r, tr.blk[:]); tr.err != nil { if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
return nil // io.EOF is okay here return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
} }
if bytes.Equal(tr.blk[:], zeroBlock[:]) { if bytes.Equal(tr.blk[:], zeroBlock[:]) {
tr.err = io.EOF return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
} else {
tr.err = ErrHeader // zero block and then non-zero block
} }
return nil return nil, nil, ErrHeader // Zero block and then non-zero block
} }
// Verify the header matches a known format. // Verify the header matches a known format.
format := tr.blk.GetFormat() format := tr.blk.GetFormat()
if format == formatUnknown { if format == formatUnknown {
tr.err = ErrHeader return nil, nil, ErrHeader
return nil
} }
var p parser var p parser
@ -577,6 +463,26 @@ func (tr *Reader) readHeader() *Header {
hdr.Typeflag = v7.TypeFlag()[0] hdr.Typeflag = v7.TypeFlag()[0]
hdr.Linkname = p.parseString(v7.LinkName()) hdr.Linkname = p.parseString(v7.LinkName())
// The atime and ctime fields are often left unused. Some versions of Go
// had a bug in the tar.Writer where it would output an invalid tar file
// in certain rare situations because the logic incorrectly believed that
// the old GNU format had a prefix field. This is wrong and leads to
// an outputted file that actually mangles the atime and ctime fields.
//
// In order to continue reading tar files created by a buggy writer, we
// try to parse the atime and ctime fields, but just return the zero value
// of time.Time when we cannot parse them.
//
// See https://golang.org/issues/12594
tryParseTime := func(b []byte) time.Time {
var p parser
n := p.parseNumeric(b)
if b[0] != 0x00 && p.err == nil {
return time.Unix(n, 0)
}
return time.Time{}
}
// Unpack format specific fields. // Unpack format specific fields.
if format > formatV7 { if format > formatV7 {
ustar := tr.blk.USTAR() ustar := tr.blk.USTAR()
@ -589,9 +495,7 @@ func (tr *Reader) readHeader() *Header {
var prefix string var prefix string
switch format { switch format {
case formatUSTAR, formatGNU: case formatUSTAR:
// TODO(dsnet): Do not use the prefix field for the GNU format!
// See golang.org/issues/12594
ustar := tr.blk.USTAR() ustar := tr.blk.USTAR()
prefix = p.parseString(ustar.Prefix()) prefix = p.parseString(ustar.Prefix())
case formatSTAR: case formatSTAR:
@ -599,97 +503,68 @@ func (tr *Reader) readHeader() *Header {
prefix = p.parseString(star.Prefix()) prefix = p.parseString(star.Prefix())
hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
case formatGNU:
gnu := tr.blk.GNU()
hdr.AccessTime = tryParseTime(gnu.AccessTime())
hdr.ChangeTime = tryParseTime(gnu.ChangeTime())
} }
if len(prefix) > 0 { if len(prefix) > 0 {
hdr.Name = prefix + "/" + hdr.Name hdr.Name = prefix + "/" + hdr.Name
} }
} }
return hdr, &tr.blk, p.err
nb := hdr.Size
if isHeaderOnlyType(hdr.Typeflag) {
nb = 0
}
if nb < 0 {
tr.err = ErrHeader
return nil
}
// Set the current file reader.
tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
tr.curr = &regFileReader{r: tr.r, nb: nb}
// Check for old GNU sparse format entry.
if hdr.Typeflag == TypeGNUSparse {
// Get the real size of the file.
hdr.Size = p.parseNumeric(tr.blk.GNU().RealSize())
if p.err != nil {
tr.err = p.err
return nil
}
// Read the sparse map.
sp := tr.readOldGNUSparseMap(&tr.blk)
if tr.err != nil {
return nil
}
// Current file is a GNU sparse file. Update the current file reader.
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
if tr.err != nil {
return nil
}
}
if p.err != nil {
tr.err = p.err
return nil
}
return hdr
} }
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. // readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, // The sparse map is stored in the tar header if it's small enough.
// then one or more extension headers are used to store the rest of the sparse map. // If it's larger than four entries, then one or more extension headers are used
func (tr *Reader) readOldGNUSparseMap(blk *block) []sparseEntry { // to store the rest of the sparse map.
var p parser //
var s sparseArray = blk.GNU().Sparse() // The Header.Size does not reflect the size of any extended headers used.
var sp = make([]sparseEntry, 0, s.MaxEntries()) // Thus, this function will read from the raw io.Reader to fetch extra headers.
for i := 0; i < s.MaxEntries(); i++ { // This method mutates blk in the process.
offset := p.parseOctal(s.Entry(i).Offset()) func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, error) {
numBytes := p.parseOctal(s.Entry(i).NumBytes()) // Make sure that the input format is GNU.
if p.err != nil { // Unfortunately, the STAR format also has a sparse header format that uses
tr.err = p.err // the same type flag but has a completely different layout.
return nil if blk.GetFormat() != formatGNU {
} return nil, ErrHeader
if offset == 0 && numBytes == 0 {
break
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
} }
for s.IsExtended()[0] > 0 { var p parser
// There are more entries. Read an extension header and parse its entries. hdr.Size = p.parseNumeric(blk.GNU().RealSize())
var blk block if p.err != nil {
if _, tr.err = io.ReadFull(tr.r, blk[:]); tr.err != nil { return nil, p.err
return nil }
} var s sparseArray = blk.GNU().Sparse()
s = blk.Sparse() var sp = make([]sparseEntry, 0, s.MaxEntries())
for {
for i := 0; i < s.MaxEntries(); i++ { for i := 0; i < s.MaxEntries(); i++ {
offset := p.parseOctal(s.Entry(i).Offset()) // This termination condition is identical to GNU and BSD tar.
numBytes := p.parseOctal(s.Entry(i).NumBytes()) if s.Entry(i).Offset()[0] == 0x00 {
if p.err != nil { break // Don't return, need to process extended headers (even if empty)
tr.err = p.err
return nil
} }
if offset == 0 && numBytes == 0 { offset := p.parseNumeric(s.Entry(i).Offset())
break numBytes := p.parseNumeric(s.Entry(i).NumBytes())
if p.err != nil {
return nil, p.err
} }
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
} }
if s.IsExtended()[0] > 0 {
// There are more entries. Read an extension header and parse its entries.
if _, err := io.ReadFull(tr.r, blk[:]); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
s = blk.Sparse()
continue
}
return sp, nil // Done
} }
return sp
} }
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format // readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
@ -817,7 +692,7 @@ func (tr *Reader) numBytes() int64 {
// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, // Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what // TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
// the Header.Size claims. // the Header.Size claims.
func (tr *Reader) Read(b []byte) (n int, err error) { func (tr *Reader) Read(b []byte) (int, error) {
if tr.err != nil { if tr.err != nil {
return 0, tr.err return 0, tr.err
} }
@ -825,11 +700,11 @@ func (tr *Reader) Read(b []byte) (n int, err error) {
return 0, io.EOF return 0, io.EOF
} }
n, err = tr.curr.Read(b) n, err := tr.curr.Read(b)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
tr.err = err tr.err = err
} }
return return n, err
} }
func (rfr *regFileReader) Read(b []byte) (n int, err error) { func (rfr *regFileReader) Read(b []byte) (n int, err error) {

View file

@ -18,17 +18,15 @@ import (
"time" "time"
) )
type untarTest struct { func TestReader(t *testing.T) {
file string // Test input file vectors := []struct {
headers []*Header // Expected output headers file string // Test input file
chksums []string // MD5 checksum of files, leave as nil if not checked headers []*Header // Expected output headers
err error // Expected error to occur chksums []string // MD5 checksum of files, leave as nil if not checked
} err error // Expected error to occur
}{{
var gnuTarTest = &untarTest{ file: "testdata/gnu.tar",
file: "testdata/gnu.tar", headers: []*Header{{
headers: []*Header{
{
Name: "small.txt", Name: "small.txt",
Mode: 0640, Mode: 0640,
Uid: 73025, Uid: 73025,
@ -38,8 +36,7 @@ var gnuTarTest = &untarTest{
Typeflag: '0', Typeflag: '0',
Uname: "dsymonds", Uname: "dsymonds",
Gname: "eng", Gname: "eng",
}, }, {
{
Name: "small2.txt", Name: "small2.txt",
Mode: 0640, Mode: 0640,
Uid: 73025, Uid: 73025,
@ -49,18 +46,14 @@ var gnuTarTest = &untarTest{
Typeflag: '0', Typeflag: '0',
Uname: "dsymonds", Uname: "dsymonds",
Gname: "eng", Gname: "eng",
}},
chksums: []string{
"e38b27eaccb4391bdec553a7f3ae6b2f",
"c65bd2e50a56a2138bf1716f2fd56fe9",
}, },
}, }, {
chksums: []string{ file: "testdata/sparse-formats.tar",
"e38b27eaccb4391bdec553a7f3ae6b2f", headers: []*Header{{
"c65bd2e50a56a2138bf1716f2fd56fe9",
},
}
var sparseTarTest = &untarTest{
file: "testdata/sparse-formats.tar",
headers: []*Header{
{
Name: "sparse-gnu", Name: "sparse-gnu",
Mode: 420, Mode: 420,
Uid: 1000, Uid: 1000,
@ -73,8 +66,7 @@ var sparseTarTest = &untarTest{
Gname: "david", Gname: "david",
Devmajor: 0, Devmajor: 0,
Devminor: 0, Devminor: 0,
}, }, {
{
Name: "sparse-posix-0.0", Name: "sparse-posix-0.0",
Mode: 420, Mode: 420,
Uid: 1000, Uid: 1000,
@ -87,8 +79,7 @@ var sparseTarTest = &untarTest{
Gname: "david", Gname: "david",
Devmajor: 0, Devmajor: 0,
Devminor: 0, Devminor: 0,
}, }, {
{
Name: "sparse-posix-0.1", Name: "sparse-posix-0.1",
Mode: 420, Mode: 420,
Uid: 1000, Uid: 1000,
@ -101,8 +92,7 @@ var sparseTarTest = &untarTest{
Gname: "david", Gname: "david",
Devmajor: 0, Devmajor: 0,
Devminor: 0, Devminor: 0,
}, }, {
{
Name: "sparse-posix-1.0", Name: "sparse-posix-1.0",
Mode: 420, Mode: 420,
Uid: 1000, Uid: 1000,
@ -115,8 +105,7 @@ var sparseTarTest = &untarTest{
Gname: "david", Gname: "david",
Devmajor: 0, Devmajor: 0,
Devminor: 0, Devminor: 0,
}, }, {
{
Name: "end", Name: "end",
Mode: 420, Mode: 420,
Uid: 1000, Uid: 1000,
@ -129,209 +118,237 @@ var sparseTarTest = &untarTest{
Gname: "david", Gname: "david",
Devmajor: 0, Devmajor: 0,
Devminor: 0, Devminor: 0,
}},
chksums: []string{
"6f53234398c2449fe67c1812d993012f",
"6f53234398c2449fe67c1812d993012f",
"6f53234398c2449fe67c1812d993012f",
"6f53234398c2449fe67c1812d993012f",
"b0061974914468de549a2af8ced10316",
}, },
}, }, {
chksums: []string{
"6f53234398c2449fe67c1812d993012f",
"6f53234398c2449fe67c1812d993012f",
"6f53234398c2449fe67c1812d993012f",
"6f53234398c2449fe67c1812d993012f",
"b0061974914468de549a2af8ced10316",
},
}
var untarTests = []*untarTest{
gnuTarTest,
sparseTarTest,
{
file: "testdata/star.tar", file: "testdata/star.tar",
headers: []*Header{ headers: []*Header{{
{ Name: "small.txt",
Name: "small.txt", Mode: 0640,
Mode: 0640, Uid: 73025,
Uid: 73025, Gid: 5000,
Gid: 5000, Size: 5,
Size: 5, ModTime: time.Unix(1244592783, 0),
ModTime: time.Unix(1244592783, 0), Typeflag: '0',
Typeflag: '0', Uname: "dsymonds",
Uname: "dsymonds", Gname: "eng",
Gname: "eng", AccessTime: time.Unix(1244592783, 0),
AccessTime: time.Unix(1244592783, 0), ChangeTime: time.Unix(1244592783, 0),
ChangeTime: time.Unix(1244592783, 0), }, {
}, Name: "small2.txt",
{ Mode: 0640,
Name: "small2.txt", Uid: 73025,
Mode: 0640, Gid: 5000,
Uid: 73025, Size: 11,
Gid: 5000, ModTime: time.Unix(1244592783, 0),
Size: 11, Typeflag: '0',
ModTime: time.Unix(1244592783, 0), Uname: "dsymonds",
Typeflag: '0', Gname: "eng",
Uname: "dsymonds", AccessTime: time.Unix(1244592783, 0),
Gname: "eng", ChangeTime: time.Unix(1244592783, 0),
AccessTime: time.Unix(1244592783, 0), }},
ChangeTime: time.Unix(1244592783, 0), }, {
},
},
},
{
file: "testdata/v7.tar", file: "testdata/v7.tar",
headers: []*Header{ headers: []*Header{{
{ Name: "small.txt",
Name: "small.txt", Mode: 0444,
Mode: 0444, Uid: 73025,
Uid: 73025, Gid: 5000,
Gid: 5000, Size: 5,
Size: 5, ModTime: time.Unix(1244593104, 0),
ModTime: time.Unix(1244593104, 0), Typeflag: '\x00',
Typeflag: '\x00', }, {
}, Name: "small2.txt",
{ Mode: 0444,
Name: "small2.txt", Uid: 73025,
Mode: 0444, Gid: 5000,
Uid: 73025, Size: 11,
Gid: 5000, ModTime: time.Unix(1244593104, 0),
Size: 11, Typeflag: '\x00',
ModTime: time.Unix(1244593104, 0), }},
Typeflag: '\x00', }, {
},
},
},
{
file: "testdata/pax.tar", file: "testdata/pax.tar",
headers: []*Header{ headers: []*Header{{
{ Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", Mode: 0664,
Mode: 0664, Uid: 1000,
Uid: 1000, Gid: 1000,
Gid: 1000, Uname: "shane",
Uname: "shane", Gname: "shane",
Gname: "shane", Size: 7,
Size: 7, ModTime: time.Unix(1350244992, 23960108),
ModTime: time.Unix(1350244992, 23960108), ChangeTime: time.Unix(1350244992, 23960108),
ChangeTime: time.Unix(1350244992, 23960108), AccessTime: time.Unix(1350244992, 23960108),
AccessTime: time.Unix(1350244992, 23960108), Typeflag: TypeReg,
Typeflag: TypeReg, }, {
}, Name: "a/b",
{ Mode: 0777,
Name: "a/b", Uid: 1000,
Mode: 0777, Gid: 1000,
Uid: 1000, Uname: "shane",
Gid: 1000, Gname: "shane",
Uname: "shane", Size: 0,
Gname: "shane", ModTime: time.Unix(1350266320, 910238425),
Size: 0, ChangeTime: time.Unix(1350266320, 910238425),
ModTime: time.Unix(1350266320, 910238425), AccessTime: time.Unix(1350266320, 910238425),
ChangeTime: time.Unix(1350266320, 910238425), Typeflag: TypeSymlink,
AccessTime: time.Unix(1350266320, 910238425), Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
Typeflag: TypeSymlink, }},
Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", }, {
}, file: "testdata/pax-bad-hdr-file.tar",
err: ErrHeader,
}, {
file: "testdata/pax-bad-mtime-file.tar",
err: ErrHeader,
}, {
file: "testdata/pax-pos-size-file.tar",
headers: []*Header{{
Name: "foo",
Mode: 0640,
Uid: 319973,
Gid: 5000,
Size: 999,
ModTime: time.Unix(1442282516, 0),
Typeflag: '0',
Uname: "joetsai",
Gname: "eng",
}},
chksums: []string{
"0afb597b283fe61b5d4879669a350556",
}, },
}, }, {
{
file: "testdata/nil-uid.tar", // golang.org/issue/5290 file: "testdata/nil-uid.tar", // golang.org/issue/5290
headers: []*Header{ headers: []*Header{{
{ Name: "P1050238.JPG.log",
Name: "P1050238.JPG.log", Mode: 0664,
Mode: 0664, Uid: 0,
Uid: 0, Gid: 0,
Gid: 0, Size: 14,
Size: 14, ModTime: time.Unix(1365454838, 0),
ModTime: time.Unix(1365454838, 0), Typeflag: TypeReg,
Typeflag: TypeReg, Linkname: "",
Linkname: "", Uname: "eyefi",
Uname: "eyefi", Gname: "eyefi",
Gname: "eyefi", Devmajor: 0,
Devmajor: 0, Devminor: 0,
Devminor: 0, }},
}, }, {
},
},
{
file: "testdata/xattrs.tar", file: "testdata/xattrs.tar",
headers: []*Header{ headers: []*Header{{
{ Name: "small.txt",
Name: "small.txt", Mode: 0644,
Mode: 0644, Uid: 1000,
Uid: 1000, Gid: 10,
Gid: 10, Size: 5,
Size: 5, ModTime: time.Unix(1386065770, 448252320),
ModTime: time.Unix(1386065770, 448252320), Typeflag: '0',
Typeflag: '0', Uname: "alex",
Uname: "alex", Gname: "wheel",
Gname: "wheel", AccessTime: time.Unix(1389782991, 419875220),
AccessTime: time.Unix(1389782991, 419875220), ChangeTime: time.Unix(1389782956, 794414986),
ChangeTime: time.Unix(1389782956, 794414986), Xattrs: map[string]string{
Xattrs: map[string]string{ "user.key": "value",
"user.key": "value", "user.key2": "value2",
"user.key2": "value2", // Interestingly, selinux encodes the terminating null inside the xattr
// Interestingly, selinux encodes the terminating null inside the xattr "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
"security.selinux": "unconfined_u:object_r:default_t:s0\x00",
},
}, },
{ }, {
Name: "small2.txt", Name: "small2.txt",
Mode: 0644, Mode: 0644,
Uid: 1000, Uid: 1000,
Gid: 10, Gid: 10,
Size: 11, Size: 11,
ModTime: time.Unix(1386065770, 449252304), ModTime: time.Unix(1386065770, 449252304),
Typeflag: '0', Typeflag: '0',
Uname: "alex", Uname: "alex",
Gname: "wheel", Gname: "wheel",
AccessTime: time.Unix(1389782991, 419875220), AccessTime: time.Unix(1389782991, 419875220),
ChangeTime: time.Unix(1386065770, 449252304), ChangeTime: time.Unix(1386065770, 449252304),
Xattrs: map[string]string{ Xattrs: map[string]string{
"security.selinux": "unconfined_u:object_r:default_t:s0\x00", "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
},
}, },
}, }},
}, }, {
{
// Matches the behavior of GNU, BSD, and STAR tar utilities. // Matches the behavior of GNU, BSD, and STAR tar utilities.
file: "testdata/gnu-multi-hdrs.tar", file: "testdata/gnu-multi-hdrs.tar",
headers: []*Header{ headers: []*Header{{
{ Name: "GNU2/GNU2/long-path-name",
Name: "GNU2/GNU2/long-path-name", Linkname: "GNU4/GNU4/long-linkpath-name",
Linkname: "GNU4/GNU4/long-linkpath-name", ModTime: time.Unix(0, 0),
ModTime: time.Unix(0, 0), Typeflag: '2',
Typeflag: '2', }},
}, }, {
}, // GNU tar file with atime and ctime fields set.
}, // Created with the GNU tar v1.27.1.
{ // tar --incremental -S -cvf gnu-incremental.tar test2
file: "testdata/gnu-incremental.tar",
headers: []*Header{{
Name: "test2/",
Mode: 16877,
Uid: 1000,
Gid: 1000,
Size: 14,
ModTime: time.Unix(1441973427, 0),
Typeflag: 'D',
Uname: "rawr",
Gname: "dsnet",
AccessTime: time.Unix(1441974501, 0),
ChangeTime: time.Unix(1441973436, 0),
}, {
Name: "test2/foo",
Mode: 33188,
Uid: 1000,
Gid: 1000,
Size: 64,
ModTime: time.Unix(1441973363, 0),
Typeflag: '0',
Uname: "rawr",
Gname: "dsnet",
AccessTime: time.Unix(1441974501, 0),
ChangeTime: time.Unix(1441973436, 0),
}, {
Name: "test2/sparse",
Mode: 33188,
Uid: 1000,
Gid: 1000,
Size: 536870912,
ModTime: time.Unix(1441973427, 0),
Typeflag: 'S',
Uname: "rawr",
Gname: "dsnet",
AccessTime: time.Unix(1441991948, 0),
ChangeTime: time.Unix(1441973436, 0),
}},
}, {
// Matches the behavior of GNU and BSD tar utilities. // Matches the behavior of GNU and BSD tar utilities.
file: "testdata/pax-multi-hdrs.tar", file: "testdata/pax-multi-hdrs.tar",
headers: []*Header{ headers: []*Header{{
{ Name: "bar",
Name: "bar", Linkname: "PAX4/PAX4/long-linkpath-name",
Linkname: "PAX4/PAX4/long-linkpath-name", ModTime: time.Unix(0, 0),
ModTime: time.Unix(0, 0), Typeflag: '2',
Typeflag: '2', }},
}, }, {
},
},
{
file: "testdata/neg-size.tar", file: "testdata/neg-size.tar",
err: ErrHeader, err: ErrHeader,
}, }, {
{
file: "testdata/issue10968.tar", file: "testdata/issue10968.tar",
err: ErrHeader, err: ErrHeader,
}, }, {
{
file: "testdata/issue11169.tar", file: "testdata/issue11169.tar",
err: ErrHeader, err: ErrHeader,
}, }, {
{
file: "testdata/issue12435.tar", file: "testdata/issue12435.tar",
err: ErrHeader, err: ErrHeader,
}, }}
}
func TestReader(t *testing.T) { for i, v := range vectors {
for i, v := range untarTests {
f, err := os.Open(v.file) f, err := os.Open(v.file)
if err != nil { if err != nil {
t.Errorf("file %s, test %d: unexpected error: %v", v.file, i, err) t.Errorf("file %s, test %d: unexpected error: %v", v.file, i, err)
@ -440,83 +457,8 @@ func TestPartialRead(t *testing.T) {
} }
} }
func TestParsePAXHeader(t *testing.T) {
paxTests := [][3]string{
{"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
{"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length
{"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
for _, test := range paxTests {
key, expected, raw := test[0], test[1], test[2]
reader := bytes.NewReader([]byte(raw))
headers, err := parsePAX(reader)
if err != nil {
t.Errorf("Couldn't parse correctly formatted headers: %v", err)
continue
}
if strings.EqualFold(headers[key], expected) {
t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
continue
}
trailer := make([]byte, 100)
n, err := reader.Read(trailer)
if err != io.EOF || n != 0 {
t.Error("Buffer wasn't consumed")
}
}
badHeaderTests := [][]byte{
[]byte("3 somelongkey=\n"),
[]byte("50 tooshort=\n"),
}
for _, test := range badHeaderTests {
if _, err := parsePAX(bytes.NewReader(test)); err != ErrHeader {
t.Fatal("Unexpected success when parsing bad header")
}
}
}
func TestParsePAXTime(t *testing.T) {
// Some valid PAX time values
timestamps := map[string]time.Time{
"1350244992.023960108": time.Unix(1350244992, 23960108), // The common case
"1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value
"1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
"1350244992": time.Unix(1350244992, 0), // Low precision value
}
for input, expected := range timestamps {
ts, err := parsePAXTime(input)
if err != nil {
t.Fatal(err)
}
if !ts.Equal(expected) {
t.Fatalf("Time parsing failure %s %s", ts, expected)
}
}
}
func TestMergePAX(t *testing.T) {
hdr := new(Header)
// Test a string, integer, and time based value.
headers := map[string]string{
"path": "a/b/c",
"uid": "1000",
"mtime": "1350244992.023960108",
}
err := mergePAX(hdr, headers)
if err != nil {
t.Fatal(err)
}
want := &Header{
Name: "a/b/c",
Uid: 1000,
ModTime: time.Unix(1350244992, 23960108),
}
if !reflect.DeepEqual(hdr, want) {
t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
}
}
func TestSparseFileReader(t *testing.T) { func TestSparseFileReader(t *testing.T) {
var vectors = []struct { vectors := []struct {
realSize int64 // Real size of the output file realSize int64 // Real size of the output file
sparseMap []sparseEntry // Input sparse map sparseMap []sparseEntry // Input sparse map
sparseData string // Input compact data sparseData string // Input compact data
@ -639,9 +581,11 @@ func TestSparseFileReader(t *testing.T) {
r := bytes.NewReader([]byte(v.sparseData)) r := bytes.NewReader([]byte(v.sparseData))
rfr := &regFileReader{r: r, nb: int64(len(v.sparseData))} rfr := &regFileReader{r: r, nb: int64(len(v.sparseData))}
var sfr *sparseFileReader var (
var err error sfr *sparseFileReader
var buf []byte err error
buf []byte
)
sfr, err = newSparseFileReader(rfr, v.sparseMap, v.realSize) sfr, err = newSparseFileReader(rfr, v.sparseMap, v.realSize)
if err != nil { if err != nil {
@ -668,6 +612,64 @@ func TestSparseFileReader(t *testing.T) {
} }
} }
func TestReadOldGNUSparseMap(t *testing.T) {
const (
t00 = "00000000000\x0000000000000\x00"
t11 = "00000000001\x0000000000001\x00"
t12 = "00000000001\x0000000000002\x00"
t21 = "00000000002\x0000000000001\x00"
)
mkBlk := func(size, sp0, sp1, sp2, sp3, ext string, format int) *block {
var blk block
copy(blk.GNU().RealSize(), size)
copy(blk.GNU().Sparse().Entry(0), sp0)
copy(blk.GNU().Sparse().Entry(1), sp1)
copy(blk.GNU().Sparse().Entry(2), sp2)
copy(blk.GNU().Sparse().Entry(3), sp3)
copy(blk.GNU().Sparse().IsExtended(), ext)
if format != formatUnknown {
blk.SetFormat(format)
}
return &blk
}
vectors := []struct {
data string // Input data
rawHdr *block // Input raw header
want []sparseEntry // Expected sparse entries to be outputted
err error // Expected error to be returned
}{
{"", mkBlk("", "", "", "", "", "", formatUnknown), nil, ErrHeader},
{"", mkBlk("1234", "fewa", "", "", "", "", formatGNU), nil, ErrHeader},
{"", mkBlk("0031", "", "", "", "", "", formatGNU), nil, nil},
{"", mkBlk("1234", t00, t11, "", "", "", formatGNU),
[]sparseEntry{{0, 0}, {1, 1}}, nil},
{"", mkBlk("1234", t11, t12, t21, t11, "", formatGNU),
[]sparseEntry{{1, 1}, {1, 2}, {2, 1}, {1, 1}}, nil},
{"", mkBlk("1234", t11, t12, t21, t11, "\x80", formatGNU),
[]sparseEntry{}, io.ErrUnexpectedEOF},
{t11 + t11,
mkBlk("1234", t11, t12, t21, t11, "\x80", formatGNU),
[]sparseEntry{}, io.ErrUnexpectedEOF},
{t11 + t21 + strings.Repeat("\x00", 512),
mkBlk("1234", t11, t12, t21, t11, "\x80", formatGNU),
[]sparseEntry{{1, 1}, {1, 2}, {2, 1}, {1, 1}, {1, 1}, {2, 1}}, nil},
}
for i, v := range vectors {
tr := Reader{r: strings.NewReader(v.data)}
hdr := new(Header)
got, err := tr.readOldGNUSparseMap(hdr, v.rawHdr)
if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
t.Errorf("test %d, readOldGNUSparseMap(...): got %v, want %v", i, got, v.want)
}
if err != v.err {
t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err)
}
}
}
func TestReadGNUSparseMap0x1(t *testing.T) { func TestReadGNUSparseMap0x1(t *testing.T) {
const ( const (
maxUint = ^uint(0) maxUint = ^uint(0)
@ -679,7 +681,7 @@ func TestReadGNUSparseMap0x1(t *testing.T) {
big3 = fmt.Sprintf("%d", (int64(maxInt) / 3)) big3 = fmt.Sprintf("%d", (int64(maxInt) / 3))
) )
var vectors = []struct { vectors := []struct {
extHdrs map[string]string // Input data extHdrs map[string]string // Input data
sparseMap []sparseEntry // Expected sparse entries to be outputted sparseMap []sparseEntry // Expected sparse entries to be outputted
err error // Expected errors that may be raised err error // Expected errors that may be raised
@ -745,12 +747,12 @@ func TestReadGNUSparseMap0x1(t *testing.T) {
} }
func TestReadGNUSparseMap1x0(t *testing.T) { func TestReadGNUSparseMap1x0(t *testing.T) {
var sp = []sparseEntry{{1, 2}, {3, 4}} sp := []sparseEntry{{1, 2}, {3, 4}}
for i := 0; i < 98; i++ { for i := 0; i < 98; i++ {
sp = append(sp, sparseEntry{54321, 12345}) sp = append(sp, sparseEntry{54321, 12345})
} }
var vectors = []struct { vectors := []struct {
input string // Input data input string // Input data
sparseMap []sparseEntry // Expected sparse entries to be outputted sparseMap []sparseEntry // Expected sparse entries to be outputted
cnt int // Expected number of bytes read cnt int // Expected number of bytes read
@ -825,8 +827,7 @@ func TestReadGNUSparseMap1x0(t *testing.T) {
} }
func TestUninitializedRead(t *testing.T) { func TestUninitializedRead(t *testing.T) {
test := gnuTarTest f, err := os.Open("testdata/gnu.tar")
f, err := os.Open(test.file)
if err != nil { if err != nil {
t.Fatalf("Unexpected error: %v", err) t.Fatalf("Unexpected error: %v", err)
} }
@ -868,7 +869,7 @@ func TestReadTruncation(t *testing.T) {
data2 += strings.Repeat("\x00", 10*512) data2 += strings.Repeat("\x00", 10*512)
trash := strings.Repeat("garbage ", 64) // Exactly 512 bytes trash := strings.Repeat("garbage ", 64) // Exactly 512 bytes
var vectors = []struct { vectors := []struct {
input string // Input stream input string // Input stream
cnt int // Expected number of headers read cnt int // Expected number of headers read
err error // Expected error outcome err error // Expected error outcome
@ -904,8 +905,7 @@ func TestReadTruncation(t *testing.T) {
{pax + trash[:1], 0, io.ErrUnexpectedEOF}, {pax + trash[:1], 0, io.ErrUnexpectedEOF},
{pax + trash[:511], 0, io.ErrUnexpectedEOF}, {pax + trash[:511], 0, io.ErrUnexpectedEOF},
{sparse[:511], 0, io.ErrUnexpectedEOF}, {sparse[:511], 0, io.ErrUnexpectedEOF},
// TODO(dsnet): This should pass, but currently fails. {sparse[:512], 0, io.ErrUnexpectedEOF},
// {sparse[:512], 0, io.ErrUnexpectedEOF},
{sparse[:3584], 1, io.EOF}, {sparse[:3584], 1, io.EOF},
{sparse[:9200], 1, io.EOF}, // Terminate in padding of sparse header {sparse[:9200], 1, io.EOF}, // Terminate in padding of sparse header
{sparse[:9216], 1, io.EOF}, {sparse[:9216], 1, io.EOF},
@ -1002,7 +1002,7 @@ func TestReadHeaderOnly(t *testing.T) {
t.Fatalf("len(hdrs): got %d, want %d", len(hdrs), 16) t.Fatalf("len(hdrs): got %d, want %d", len(hdrs), 16)
} }
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
var hdr1, hdr2 = hdrs[i+0], hdrs[i+8] hdr1, hdr2 := hdrs[i+0], hdrs[i+8]
hdr1.Size, hdr2.Size = 0, 0 hdr1.Size, hdr2.Size = 0, 0
if !reflect.DeepEqual(*hdr1, *hdr2) { if !reflect.DeepEqual(*hdr1, *hdr2) {
t.Errorf("incorrect header:\ngot %+v\nwant %+v", *hdr1, *hdr2) t.Errorf("incorrect header:\ngot %+v\nwant %+v", *hdr1, *hdr2)
@ -1010,116 +1010,87 @@ func TestReadHeaderOnly(t *testing.T) {
} }
} }
func TestParsePAXRecord(t *testing.T) { func TestMergePAX(t *testing.T) {
var medName = strings.Repeat("CD", 50) vectors := []struct {
var longName = strings.Repeat("AB", 100) in map[string]string
want *Header
ok bool
}{{
in: map[string]string{
"path": "a/b/c",
"uid": "1000",
"mtime": "1350244992.023960108",
},
want: &Header{
Name: "a/b/c",
Uid: 1000,
ModTime: time.Unix(1350244992, 23960108),
},
ok: true,
}, {
in: map[string]string{
"gid": "gtgergergersagersgers",
},
}, {
in: map[string]string{
"missing": "missing",
"SCHILY.xattr.key": "value",
},
want: &Header{
Xattrs: map[string]string{"key": "value"},
},
ok: true,
}}
var vectors = []struct { for i, v := range vectors {
input string got := new(Header)
residual string err := mergePAX(got, v.in)
outputKey string if v.ok && !reflect.DeepEqual(*got, *v.want) {
outputVal string t.Errorf("test %d, mergePAX(...):\ngot %+v\nwant %+v", i, *got, *v.want)
ok bool
}{
{"6 k=v\n\n", "\n", "k", "v", true},
{"19 path=/etc/hosts\n", "", "path", "/etc/hosts", true},
{"210 path=" + longName + "\nabc", "abc", "path", longName, true},
{"110 path=" + medName + "\n", "", "path", medName, true},
{"9 foo=ba\n", "", "foo", "ba", true},
{"11 foo=bar\n\x00", "\x00", "foo", "bar", true},
{"18 foo=b=\nar=\n==\x00\n", "", "foo", "b=\nar=\n==\x00", true},
{"27 foo=hello9 foo=ba\nworld\n", "", "foo", "hello9 foo=ba\nworld", true},
{"27 ☺☻☹=日a本b語ç\nmeow mix", "meow mix", "☺☻☹", "日a本b語ç", true},
{"17 \x00hello=\x00world\n", "", "\x00hello", "\x00world", true},
{"1 k=1\n", "1 k=1\n", "", "", false},
{"6 k~1\n", "6 k~1\n", "", "", false},
{"6_k=1\n", "6_k=1\n", "", "", false},
{"6 k=1 ", "6 k=1 ", "", "", false},
{"632 k=1\n", "632 k=1\n", "", "", false},
{"16 longkeyname=hahaha\n", "16 longkeyname=hahaha\n", "", "", false},
{"3 somelongkey=\n", "3 somelongkey=\n", "", "", false},
{"50 tooshort=\n", "50 tooshort=\n", "", "", false},
}
for _, v := range vectors {
key, val, res, err := parsePAXRecord(v.input)
ok := (err == nil)
if v.ok != ok {
if v.ok {
t.Errorf("parsePAXRecord(%q): got parsing failure, want success", v.input)
} else {
t.Errorf("parsePAXRecord(%q): got parsing success, want failure", v.input)
}
} }
if ok && (key != v.outputKey || val != v.outputVal) { if ok := err == nil; ok != v.ok {
t.Errorf("parsePAXRecord(%q): got (%q: %q), want (%q: %q)", t.Errorf("test %d, mergePAX(...): got %v, want %v", i, ok, v.ok)
v.input, key, val, v.outputKey, v.outputVal)
}
if res != v.residual {
t.Errorf("parsePAXRecord(%q): got residual %q, want residual %q",
v.input, res, v.residual)
} }
} }
} }
func TestParseNumeric(t *testing.T) { func TestParsePAX(t *testing.T) {
var vectors = []struct { vectors := []struct {
input string in string
output int64 want map[string]string
ok bool ok bool
}{ }{
// Test base-256 (binary) encoded values. {"", nil, true},
{"", 0, true}, {"6 k=1\n", map[string]string{"k": "1"}, true},
{"\x80", 0, true}, {"10 a=name\n", map[string]string{"a": "name"}, true},
{"\x80\x00", 0, true}, {"9 a=name\n", map[string]string{"a": "name"}, true},
{"\x80\x00\x00", 0, true}, {"30 mtime=1350244992.023960108\n", map[string]string{"mtime": "1350244992.023960108"}, true},
{"\xbf", (1 << 6) - 1, true}, {"3 somelongkey=\n", nil, false},
{"\xbf\xff", (1 << 14) - 1, true}, {"50 tooshort=\n", nil, false},
{"\xbf\xff\xff", (1 << 22) - 1, true}, {"13 key1=haha\n13 key2=nana\n13 key3=kaka\n",
{"\xff", -1, true}, map[string]string{"key1": "haha", "key2": "nana", "key3": "kaka"}, true},
{"\xff\xff", -1, true}, {"13 key1=val1\n13 key2=val2\n8 key1=\n",
{"\xff\xff\xff", -1, true}, map[string]string{"key2": "val2"}, true},
{"\xc0", -1 * (1 << 6), true}, {"22 GNU.sparse.size=10\n26 GNU.sparse.numblocks=2\n" +
{"\xc0\x00", -1 * (1 << 14), true}, "23 GNU.sparse.offset=1\n25 GNU.sparse.numbytes=2\n" +
{"\xc0\x00\x00", -1 * (1 << 22), true}, "23 GNU.sparse.offset=3\n25 GNU.sparse.numbytes=4\n",
{"\x87\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true}, map[string]string{paxGNUSparseSize: "10", paxGNUSparseNumBlocks: "2", paxGNUSparseMap: "1,2,3,4"}, true},
{"\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true}, {"22 GNU.sparse.size=10\n26 GNU.sparse.numblocks=1\n" +
{"\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true}, "25 GNU.sparse.numbytes=2\n23 GNU.sparse.offset=1\n",
{"\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true}, nil, false},
{"\x80\x7f\xff\xff\xff\xff\xff\xff\xff", math.MaxInt64, true}, {"22 GNU.sparse.size=10\n26 GNU.sparse.numblocks=1\n" +
{"\x80\x80\x00\x00\x00\x00\x00\x00\x00", 0, false}, "25 GNU.sparse.offset=1,2\n25 GNU.sparse.numbytes=2\n",
{"\xff\x80\x00\x00\x00\x00\x00\x00\x00", math.MinInt64, true}, nil, false},
{"\xff\x7f\xff\xff\xff\xff\xff\xff\xff", 0, false},
{"\xf5\xec\xd1\xc7\x7e\x5f\x26\x48\x81\x9f\x8f\x9b", 0, false},
// Test base-8 (octal) encoded values.
{"0000000\x00", 0, true},
{" \x0000000\x00", 0, true},
{" \x0000003\x00", 3, true},
{"00000000227\x00", 0227, true},
{"032033\x00 ", 032033, true},
{"320330\x00 ", 0320330, true},
{"0000660\x00 ", 0660, true},
{"\x00 0000660\x00 ", 0660, true},
{"0123456789abcdef", 0, false},
{"0123456789\x00abcdef", 0, false},
{"01234567\x0089abcdef", 342391, true},
{"0123\x7e\x5f\x264123", 0, false},
} }
for _, v := range vectors { for i, v := range vectors {
var p parser r := strings.NewReader(v.in)
num := p.parseNumeric([]byte(v.input)) got, err := parsePAX(r)
ok := (p.err == nil) if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
if v.ok != ok { t.Errorf("test %d, parsePAX(...):\ngot %v\nwant %v", i, got, v.want)
if v.ok {
t.Errorf("parseNumeric(%q): got parsing failure, want success", v.input)
} else {
t.Errorf("parseNumeric(%q): got parsing success, want failure", v.input)
}
} }
if ok && num != v.output { if ok := err == nil; ok != v.ok {
t.Errorf("parseNumeric(%q): got %d, want %d", v.input, num, v.output) t.Errorf("test %d, parsePAX(...): got %v, want %v", i, ok, v.ok)
} }
} }
} }

View file

@ -0,0 +1,252 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"fmt"
"strconv"
"strings"
"time"
)
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 {
return false
}
}
return true
}
func toASCII(s string) string {
if isASCII(s) {
return s
}
var buf bytes.Buffer
for _, c := range s {
if c < 0x80 {
buf.WriteByte(byte(c))
}
}
return buf.String()
}
type parser struct {
err error // Last error seen
}
type formatter struct {
err error // Last error seen
}
// parseString parses bytes as a NUL-terminated C-style string.
// If a NUL byte is not found then the whole slice is returned as a string.
func (*parser) parseString(b []byte) string {
n := 0
for n < len(b) && b[n] != 0 {
n++
}
return string(b[0:n])
}
// Write s into b, terminating it with a NUL if there is room.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
return
}
ascii := toASCII(s)
copy(b, ascii)
if len(ascii) < len(b) {
b[len(ascii)] = 0
}
}
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
// encoding. Unlike octal encoding, base-256 encoding does not require that the
// string ends with a NUL character. Thus, all n bytes are available for output.
//
// If operating in binary mode, this assumes strict GNU binary mode; which means
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
var binBits = uint(n-1) * 8
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
}
// parseNumeric parses the input as being encoded in either base-256 or octal.
// This function may return negative numbers.
// If parsing fails or an integer overflow occurs, err will be set.
func (p *parser) parseNumeric(b []byte) int64 {
// Check for base-256 (binary) format first.
// If the first bit is set, then all following bits constitute a two's
// complement encoded number in big-endian byte order.
if len(b) > 0 && b[0]&0x80 != 0 {
// Handling negative numbers relies on the following identity:
// -a-1 == ^a
//
// If the number is negative, we use an inversion mask to invert the
// data bytes and treat the value as an unsigned number.
var inv byte // 0x00 if positive or zero, 0xff if negative
if b[0]&0x40 != 0 {
inv = 0xff
}
var x uint64
for i, c := range b {
c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
if i == 0 {
c &= 0x7f // Ignore signal bit in first byte
}
if (x >> 56) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
x = x<<8 | uint64(c)
}
if (x >> 63) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
if inv == 0xff {
return ^int64(x)
}
return int64(x)
}
// Normal case is base-8 (octal) format.
return p.parseOctal(b)
}
// Write x into b, as binary (GNUtar/star extension).
func (f *formatter) formatNumeric(b []byte, x int64) {
if fitsInBase256(len(b), x) {
for i := len(b) - 1; i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // Highest bit indicates binary format
return
}
f.formatOctal(b, 0) // Last resort, just write zero
f.err = ErrFieldTooLong
}
func (p *parser) parseOctal(b []byte) int64 {
// Because unused fields are filled with NULs, we need
// to skip leading NULs. Fields may also be padded with
// spaces or NULs.
// So we remove leading and trailing NULs and spaces to
// be sure.
b = bytes.Trim(b, " \x00")
if len(b) == 0 {
return 0
}
x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
if perr != nil {
p.err = ErrHeader
}
return int64(x)
}
func (f *formatter) formatOctal(b []byte, x int64) {
s := strconv.FormatInt(x, 8)
// Add leading zeros, but leave room for a NUL.
if n := len(b) - len(s) - 1; n > 0 {
s = strings.Repeat("0", n) + s
}
f.formatString(b, s)
}
// parsePAXTime takes a string of the form %d.%d as described in the PAX
// specification. Note that this implementation allows for negative timestamps,
// which is allowed for by the PAX specification, but not always portable.
func parsePAXTime(s string) (time.Time, error) {
const maxNanoSecondDigits = 9
// Split string into seconds and sub-seconds parts.
ss, sn := s, ""
if pos := strings.IndexByte(s, '.'); pos >= 0 {
ss, sn = s[:pos], s[pos+1:]
}
// Parse the seconds.
secs, err := strconv.ParseInt(ss, 10, 64)
if err != nil {
return time.Time{}, ErrHeader
}
if len(sn) == 0 {
return time.Unix(secs, 0), nil // No sub-second values
}
// Parse the nanoseconds.
if strings.Trim(sn, "0123456789") != "" {
return time.Time{}, ErrHeader
}
if len(sn) < maxNanoSecondDigits {
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
} else {
sn = sn[:maxNanoSecondDigits] // Right truncate
}
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
if len(ss) > 0 && ss[0] == '-' {
return time.Unix(secs, -1*int64(nsecs)), nil // Negative correction
}
return time.Unix(secs, int64(nsecs)), nil
}
// TODO(dsnet): Implement formatPAXTime.
// parsePAXRecord parses the input PAX record string into a key-value pair.
// If parsing is successful, it will slice off the currently read record and
// return the remainder as r.
//
// A PAX record is of the following form:
// "%d %s=%s\n" % (size, key, value)
func parsePAXRecord(s string) (k, v, r string, err error) {
// The size field ends at the first space.
sp := strings.IndexByte(s, ' ')
if sp == -1 {
return "", "", s, ErrHeader
}
// Parse the first token as a decimal integer.
n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
if perr != nil || n < 5 || int64(len(s)) < n {
return "", "", s, ErrHeader
}
// Extract everything between the space and the final newline.
rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
if nl != "\n" {
return "", "", s, ErrHeader
}
// The first equals separates the key from the value.
eq := strings.IndexByte(rec, '=')
if eq == -1 {
return "", "", s, ErrHeader
}
return rec[:eq], rec[eq+1:], rem, nil
}
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
func formatPAXRecord(k, v string) string {
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
}
return record
}

View file

@ -0,0 +1,319 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"math"
"strings"
"testing"
"time"
)
func TestFitsInBase256(t *testing.T) {
vectors := []struct {
in int64
width int
ok bool
}{
{+1, 8, true},
{0, 8, true},
{-1, 8, true},
{1 << 56, 8, false},
{(1 << 56) - 1, 8, true},
{-1 << 56, 8, true},
{(-1 << 56) - 1, 8, false},
{121654, 8, true},
{-9849849, 8, true},
{math.MaxInt64, 9, true},
{0, 9, true},
{math.MinInt64, 9, true},
{math.MaxInt64, 12, true},
{0, 12, true},
{math.MinInt64, 12, true},
}
for _, v := range vectors {
ok := fitsInBase256(v.width, v.in)
if ok != v.ok {
t.Errorf("fitsInBase256(%d, %d): got %v, want %v", v.in, v.width, ok, v.ok)
}
}
}
func TestParseNumeric(t *testing.T) {
vectors := []struct {
in string
want int64
ok bool
}{
// Test base-256 (binary) encoded values.
{"", 0, true},
{"\x80", 0, true},
{"\x80\x00", 0, true},
{"\x80\x00\x00", 0, true},
{"\xbf", (1 << 6) - 1, true},
{"\xbf\xff", (1 << 14) - 1, true},
{"\xbf\xff\xff", (1 << 22) - 1, true},
{"\xff", -1, true},
{"\xff\xff", -1, true},
{"\xff\xff\xff", -1, true},
{"\xc0", -1 * (1 << 6), true},
{"\xc0\x00", -1 * (1 << 14), true},
{"\xc0\x00\x00", -1 * (1 << 22), true},
{"\x87\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true},
{"\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true},
{"\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true},
{"\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true},
{"\x80\x7f\xff\xff\xff\xff\xff\xff\xff", math.MaxInt64, true},
{"\x80\x80\x00\x00\x00\x00\x00\x00\x00", 0, false},
{"\xff\x80\x00\x00\x00\x00\x00\x00\x00", math.MinInt64, true},
{"\xff\x7f\xff\xff\xff\xff\xff\xff\xff", 0, false},
{"\xf5\xec\xd1\xc7\x7e\x5f\x26\x48\x81\x9f\x8f\x9b", 0, false},
// Test base-8 (octal) encoded values.
{"0000000\x00", 0, true},
{" \x0000000\x00", 0, true},
{" \x0000003\x00", 3, true},
{"00000000227\x00", 0227, true},
{"032033\x00 ", 032033, true},
{"320330\x00 ", 0320330, true},
{"0000660\x00 ", 0660, true},
{"\x00 0000660\x00 ", 0660, true},
{"0123456789abcdef", 0, false},
{"0123456789\x00abcdef", 0, false},
{"01234567\x0089abcdef", 342391, true},
{"0123\x7e\x5f\x264123", 0, false},
}
for _, v := range vectors {
var p parser
got := p.parseNumeric([]byte(v.in))
ok := (p.err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("parseNumeric(%q): got parsing failure, want success", v.in)
} else {
t.Errorf("parseNumeric(%q): got parsing success, want failure", v.in)
}
}
if ok && got != v.want {
t.Errorf("parseNumeric(%q): got %d, want %d", v.in, got, v.want)
}
}
}
func TestFormatNumeric(t *testing.T) {
vectors := []struct {
in int64
want string
ok bool
}{
// Test base-256 (binary) encoded values.
{-1, "\xff", true},
{-1, "\xff\xff", true},
{-1, "\xff\xff\xff", true},
{(1 << 0), "0", false},
{(1 << 8) - 1, "\x80\xff", true},
{(1 << 8), "0\x00", false},
{(1 << 16) - 1, "\x80\xff\xff", true},
{(1 << 16), "00\x00", false},
{-1 * (1 << 0), "\xff", true},
{-1*(1<<0) - 1, "0", false},
{-1 * (1 << 8), "\xff\x00", true},
{-1*(1<<8) - 1, "0\x00", false},
{-1 * (1 << 16), "\xff\x00\x00", true},
{-1*(1<<16) - 1, "00\x00", false},
{537795476381659745, "0000000\x00", false},
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
{-615126028225187231, "0000000\x00", false},
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
{math.MaxInt64, "0000000\x00", false},
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "0000000\x00", false},
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
}
for _, v := range vectors {
var f formatter
got := make([]byte, len(v.want))
f.formatNumeric(got, v.in)
ok := (f.err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.in)
} else {
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.in)
}
}
if string(got) != v.want {
t.Errorf("formatNumeric(%d): got %q, want %q", v.in, got, v.want)
}
}
}
func TestParsePAXTime(t *testing.T) {
vectors := []struct {
in string
want time.Time
ok bool
}{
{"1350244992.023960108", time.Unix(1350244992, 23960108), true},
{"1350244992.02396010", time.Unix(1350244992, 23960100), true},
{"1350244992.0239601089", time.Unix(1350244992, 23960108), true},
{"1350244992.3", time.Unix(1350244992, 300000000), true},
{"1350244992", time.Unix(1350244992, 0), true},
{"-1.000000001", time.Unix(-1, -1e0+0e0), true},
{"-1.000001", time.Unix(-1, -1e3+0e0), true},
{"-1.001000", time.Unix(-1, -1e6+0e0), true},
{"-1", time.Unix(-1, -0e0+0e0), true},
{"-1.999000", time.Unix(-1, -1e9+1e6), true},
{"-1.999999", time.Unix(-1, -1e9+1e3), true},
{"-1.999999999", time.Unix(-1, -1e9+1e0), true},
{"0.000000001", time.Unix(0, 1e0+0e0), true},
{"0.000001", time.Unix(0, 1e3+0e0), true},
{"0.001000", time.Unix(0, 1e6+0e0), true},
{"0", time.Unix(0, 0e0), true},
{"0.999000", time.Unix(0, 1e9-1e6), true},
{"0.999999", time.Unix(0, 1e9-1e3), true},
{"0.999999999", time.Unix(0, 1e9-1e0), true},
{"1.000000001", time.Unix(+1, +1e0-0e0), true},
{"1.000001", time.Unix(+1, +1e3-0e0), true},
{"1.001000", time.Unix(+1, +1e6-0e0), true},
{"1", time.Unix(+1, +0e0-0e0), true},
{"1.999000", time.Unix(+1, +1e9-1e6), true},
{"1.999999", time.Unix(+1, +1e9-1e3), true},
{"1.999999999", time.Unix(+1, +1e9-1e0), true},
{"-1350244992.023960108", time.Unix(-1350244992, -23960108), true},
{"-1350244992.02396010", time.Unix(-1350244992, -23960100), true},
{"-1350244992.0239601089", time.Unix(-1350244992, -23960108), true},
{"-1350244992.3", time.Unix(-1350244992, -300000000), true},
{"-1350244992", time.Unix(-1350244992, 0), true},
{"", time.Time{}, false},
{"0", time.Unix(0, 0), true},
{"1.", time.Unix(1, 0), true},
{"0.0", time.Unix(0, 0), true},
{".5", time.Time{}, false},
{"-1.3", time.Unix(-1, -3e8), true},
{"-1.0", time.Unix(-1, -0e0), true},
{"-0.0", time.Unix(-0, -0e0), true},
{"-0.1", time.Unix(-0, -1e8), true},
{"-0.01", time.Unix(-0, -1e7), true},
{"-0.99", time.Unix(-0, -99e7), true},
{"-0.98", time.Unix(-0, -98e7), true},
{"-1.1", time.Unix(-1, -1e8), true},
{"-1.01", time.Unix(-1, -1e7), true},
{"-2.99", time.Unix(-2, -99e7), true},
{"-5.98", time.Unix(-5, -98e7), true},
{"-", time.Time{}, false},
{"+", time.Time{}, false},
{"-1.-1", time.Time{}, false},
{"99999999999999999999999999999999999999999999999", time.Time{}, false},
{"0.123456789abcdef", time.Time{}, false},
{"foo", time.Time{}, false},
{"\x00", time.Time{}, false},
{"𝟵𝟴𝟳𝟲𝟱.𝟰𝟯𝟮𝟭𝟬", time.Time{}, false}, // Unicode numbers (U+1D7EC to U+1D7F5)
{"98765﹒43210", time.Time{}, false}, // Unicode period (U+FE52)
}
for _, v := range vectors {
ts, err := parsePAXTime(v.in)
ok := (err == nil)
if v.ok != ok {
if v.ok {
t.Errorf("parsePAXTime(%q): got parsing failure, want success", v.in)
} else {
t.Errorf("parsePAXTime(%q): got parsing success, want failure", v.in)
}
}
if ok && !ts.Equal(v.want) {
t.Errorf("parsePAXTime(%q): got (%ds %dns), want (%ds %dns)",
v.in, ts.Unix(), ts.Nanosecond(), v.want.Unix(), v.want.Nanosecond())
}
}
}
func TestParsePAXRecord(t *testing.T) {
medName := strings.Repeat("CD", 50)
longName := strings.Repeat("AB", 100)
vectors := []struct {
in string
wantRes string
wantKey string
wantVal string
ok bool
}{
{"6 k=v\n\n", "\n", "k", "v", true},
{"19 path=/etc/hosts\n", "", "path", "/etc/hosts", true},
{"210 path=" + longName + "\nabc", "abc", "path", longName, true},
{"110 path=" + medName + "\n", "", "path", medName, true},
{"9 foo=ba\n", "", "foo", "ba", true},
{"11 foo=bar\n\x00", "\x00", "foo", "bar", true},
{"18 foo=b=\nar=\n==\x00\n", "", "foo", "b=\nar=\n==\x00", true},
{"27 foo=hello9 foo=ba\nworld\n", "", "foo", "hello9 foo=ba\nworld", true},
{"27 ☺☻☹=日a本b語ç\nmeow mix", "meow mix", "☺☻☹", "日a本b語ç", true},
{"17 \x00hello=\x00world\n", "", "\x00hello", "\x00world", true},
{"1 k=1\n", "1 k=1\n", "", "", false},
{"6 k~1\n", "6 k~1\n", "", "", false},
{"6_k=1\n", "6_k=1\n", "", "", false},
{"6 k=1 ", "6 k=1 ", "", "", false},
{"632 k=1\n", "632 k=1\n", "", "", false},
{"16 longkeyname=hahaha\n", "16 longkeyname=hahaha\n", "", "", false},
{"3 somelongkey=\n", "3 somelongkey=\n", "", "", false},
{"50 tooshort=\n", "50 tooshort=\n", "", "", false},
}
for _, v := range vectors {
key, val, res, err := parsePAXRecord(v.in)
ok := (err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("parsePAXRecord(%q): got parsing failure, want success", v.in)
} else {
t.Errorf("parsePAXRecord(%q): got parsing success, want failure", v.in)
}
}
if v.ok && (key != v.wantKey || val != v.wantVal) {
t.Errorf("parsePAXRecord(%q): got (%q: %q), want (%q: %q)",
v.in, key, val, v.wantKey, v.wantVal)
}
if res != v.wantRes {
t.Errorf("parsePAXRecord(%q): got residual %q, want residual %q",
v.in, res, v.wantRes)
}
}
}
func TestFormatPAXRecord(t *testing.T) {
medName := strings.Repeat("CD", 50)
longName := strings.Repeat("AB", 100)
vectors := []struct {
inKey string
inVal string
want string
}{
{"k", "v", "6 k=v\n"},
{"path", "/etc/hosts", "19 path=/etc/hosts\n"},
{"path", longName, "210 path=" + longName + "\n"},
{"path", medName, "110 path=" + medName + "\n"},
{"foo", "ba", "9 foo=ba\n"},
{"foo", "bar", "11 foo=bar\n"},
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
}
for _, v := range vectors {
got := formatPAXRecord(v.inKey, v.inVal)
if got != v.want {
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
v.inKey, v.inVal, got, v.want)
}
}
}

View file

@ -135,190 +135,178 @@ type headerRoundTripTest struct {
} }
func TestHeaderRoundTrip(t *testing.T) { func TestHeaderRoundTrip(t *testing.T) {
golden := []headerRoundTripTest{ vectors := []headerRoundTripTest{{
// regular file. // regular file.
{ h: &Header{
h: &Header{ Name: "test.txt",
Name: "test.txt", Mode: 0644 | c_ISREG,
Mode: 0644 | c_ISREG, Size: 12,
Size: 12, ModTime: time.Unix(1360600916, 0),
ModTime: time.Unix(1360600916, 0), Typeflag: TypeReg,
Typeflag: TypeReg,
},
fm: 0644,
}, },
fm: 0644,
}, {
// symbolic link. // symbolic link.
{ h: &Header{
h: &Header{ Name: "link.txt",
Name: "link.txt", Mode: 0777 | c_ISLNK,
Mode: 0777 | c_ISLNK, Size: 0,
Size: 0, ModTime: time.Unix(1360600852, 0),
ModTime: time.Unix(1360600852, 0), Typeflag: TypeSymlink,
Typeflag: TypeSymlink,
},
fm: 0777 | os.ModeSymlink,
}, },
fm: 0777 | os.ModeSymlink,
}, {
// character device node. // character device node.
{ h: &Header{
h: &Header{ Name: "dev/null",
Name: "dev/null", Mode: 0666 | c_ISCHR,
Mode: 0666 | c_ISCHR, Size: 0,
Size: 0, ModTime: time.Unix(1360578951, 0),
ModTime: time.Unix(1360578951, 0), Typeflag: TypeChar,
Typeflag: TypeChar,
},
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
}, },
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
}, {
// block device node. // block device node.
{ h: &Header{
h: &Header{ Name: "dev/sda",
Name: "dev/sda", Mode: 0660 | c_ISBLK,
Mode: 0660 | c_ISBLK, Size: 0,
Size: 0, ModTime: time.Unix(1360578954, 0),
ModTime: time.Unix(1360578954, 0), Typeflag: TypeBlock,
Typeflag: TypeBlock,
},
fm: 0660 | os.ModeDevice,
}, },
fm: 0660 | os.ModeDevice,
}, {
// directory. // directory.
{ h: &Header{
h: &Header{ Name: "dir/",
Name: "dir/", Mode: 0755 | c_ISDIR,
Mode: 0755 | c_ISDIR, Size: 0,
Size: 0, ModTime: time.Unix(1360601116, 0),
ModTime: time.Unix(1360601116, 0), Typeflag: TypeDir,
Typeflag: TypeDir,
},
fm: 0755 | os.ModeDir,
}, },
fm: 0755 | os.ModeDir,
}, {
// fifo node. // fifo node.
{ h: &Header{
h: &Header{ Name: "dev/initctl",
Name: "dev/initctl", Mode: 0600 | c_ISFIFO,
Mode: 0600 | c_ISFIFO, Size: 0,
Size: 0, ModTime: time.Unix(1360578949, 0),
ModTime: time.Unix(1360578949, 0), Typeflag: TypeFifo,
Typeflag: TypeFifo,
},
fm: 0600 | os.ModeNamedPipe,
}, },
fm: 0600 | os.ModeNamedPipe,
}, {
// setuid. // setuid.
{ h: &Header{
h: &Header{ Name: "bin/su",
Name: "bin/su", Mode: 0755 | c_ISREG | c_ISUID,
Mode: 0755 | c_ISREG | c_ISUID, Size: 23232,
Size: 23232, ModTime: time.Unix(1355405093, 0),
ModTime: time.Unix(1355405093, 0), Typeflag: TypeReg,
Typeflag: TypeReg,
},
fm: 0755 | os.ModeSetuid,
}, },
fm: 0755 | os.ModeSetuid,
}, {
// setguid. // setguid.
{ h: &Header{
h: &Header{ Name: "group.txt",
Name: "group.txt", Mode: 0750 | c_ISREG | c_ISGID,
Mode: 0750 | c_ISREG | c_ISGID, Size: 0,
Size: 0, ModTime: time.Unix(1360602346, 0),
ModTime: time.Unix(1360602346, 0), Typeflag: TypeReg,
Typeflag: TypeReg,
},
fm: 0750 | os.ModeSetgid,
}, },
fm: 0750 | os.ModeSetgid,
}, {
// sticky. // sticky.
{ h: &Header{
h: &Header{ Name: "sticky.txt",
Name: "sticky.txt", Mode: 0600 | c_ISREG | c_ISVTX,
Mode: 0600 | c_ISREG | c_ISVTX, Size: 7,
Size: 7, ModTime: time.Unix(1360602540, 0),
ModTime: time.Unix(1360602540, 0), Typeflag: TypeReg,
Typeflag: TypeReg,
},
fm: 0600 | os.ModeSticky,
}, },
fm: 0600 | os.ModeSticky,
}, {
// hard link. // hard link.
{ h: &Header{
h: &Header{ Name: "hard.txt",
Name: "hard.txt", Mode: 0644 | c_ISREG,
Mode: 0644 | c_ISREG, Size: 0,
Size: 0, Linkname: "file.txt",
Linkname: "file.txt", ModTime: time.Unix(1360600916, 0),
ModTime: time.Unix(1360600916, 0), Typeflag: TypeLink,
Typeflag: TypeLink,
},
fm: 0644,
}, },
fm: 0644,
}, {
// More information. // More information.
{ h: &Header{
h: &Header{ Name: "info.txt",
Name: "info.txt", Mode: 0600 | c_ISREG,
Mode: 0600 | c_ISREG, Size: 0,
Size: 0, Uid: 1000,
Uid: 1000, Gid: 1000,
Gid: 1000, ModTime: time.Unix(1360602540, 0),
ModTime: time.Unix(1360602540, 0), Uname: "slartibartfast",
Uname: "slartibartfast", Gname: "users",
Gname: "users", Typeflag: TypeReg,
Typeflag: TypeReg,
},
fm: 0600,
}, },
} fm: 0600,
}}
for i, g := range golden { for i, v := range vectors {
fi := g.h.FileInfo() fi := v.h.FileInfo()
h2, err := FileInfoHeader(fi, "") h2, err := FileInfoHeader(fi, "")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
continue continue
} }
if strings.Contains(fi.Name(), "/") { if strings.Contains(fi.Name(), "/") {
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name()) t.Errorf("FileInfo of %q contains slash: %q", v.h.Name, fi.Name())
} }
name := path.Base(g.h.Name) name := path.Base(v.h.Name)
if fi.IsDir() { if fi.IsDir() {
name += "/" name += "/"
} }
if got, want := h2.Name, name; got != want { if got, want := h2.Name, name; got != want {
t.Errorf("i=%d: Name: got %v, want %v", i, got, want) t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
} }
if got, want := h2.Size, g.h.Size; got != want { if got, want := h2.Size, v.h.Size; got != want {
t.Errorf("i=%d: Size: got %v, want %v", i, got, want) t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
} }
if got, want := h2.Uid, g.h.Uid; got != want { if got, want := h2.Uid, v.h.Uid; got != want {
t.Errorf("i=%d: Uid: got %d, want %d", i, got, want) t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
} }
if got, want := h2.Gid, g.h.Gid; got != want { if got, want := h2.Gid, v.h.Gid; got != want {
t.Errorf("i=%d: Gid: got %d, want %d", i, got, want) t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
} }
if got, want := h2.Uname, g.h.Uname; got != want { if got, want := h2.Uname, v.h.Uname; got != want {
t.Errorf("i=%d: Uname: got %q, want %q", i, got, want) t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
} }
if got, want := h2.Gname, g.h.Gname; got != want { if got, want := h2.Gname, v.h.Gname; got != want {
t.Errorf("i=%d: Gname: got %q, want %q", i, got, want) t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
} }
if got, want := h2.Linkname, g.h.Linkname; got != want { if got, want := h2.Linkname, v.h.Linkname; got != want {
t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want) t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
} }
if got, want := h2.Typeflag, g.h.Typeflag; got != want { if got, want := h2.Typeflag, v.h.Typeflag; got != want {
t.Logf("%#v %#v", g.h, fi.Sys()) t.Logf("%#v %#v", v.h, fi.Sys())
t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want) t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
} }
if got, want := h2.Mode, g.h.Mode; got != want { if got, want := h2.Mode, v.h.Mode; got != want {
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want) t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
} }
if got, want := fi.Mode(), g.fm; got != want { if got, want := fi.Mode(), v.fm; got != want {
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want) t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
} }
if got, want := h2.AccessTime, g.h.AccessTime; got != want { if got, want := h2.AccessTime, v.h.AccessTime; got != want {
t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want) t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
} }
if got, want := h2.ChangeTime, g.h.ChangeTime; got != want { if got, want := h2.ChangeTime, v.h.ChangeTime; got != want {
t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want) t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
} }
if got, want := h2.ModTime, g.h.ModTime; got != want { if got, want := h2.ModTime, v.h.ModTime; got != want {
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want) t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
} }
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h { if sysh, ok := fi.Sys().(*Header); !ok || sysh != v.h {
t.Errorf("i=%d: Sys didn't return original *Header", i) t.Errorf("i=%d: Sys didn't return original *Header", i)
} }
} }

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -42,10 +42,6 @@ type Writer struct {
paxHdrBuff block // buffer to use in writeHeader when writing a PAX header paxHdrBuff block // buffer to use in writeHeader when writing a PAX header
} }
type formatter struct {
err error // Last error seen
}
// NewWriter creates a new Writer writing to w. // NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
@ -71,56 +67,6 @@ func (tw *Writer) Flush() error {
return tw.err return tw.err
} }
// Write s into b, terminating it with a NUL if there is room.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
return
}
ascii := toASCII(s)
copy(b, ascii)
if len(ascii) < len(b) {
b[len(ascii)] = 0
}
}
// Encode x as an octal ASCII string and write it into b with leading zeros.
func (f *formatter) formatOctal(b []byte, x int64) {
s := strconv.FormatInt(x, 8)
// leading zeros, but leave room for a NUL.
for len(s)+1 < len(b) {
s = "0" + s
}
f.formatString(b, s)
}
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
// encoding. Unlike octal encoding, base-256 encoding does not require that the
// string ends with a NUL character. Thus, all n bytes are available for output.
//
// If operating in binary mode, this assumes strict GNU binary mode; which means
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
var binBits = uint(n-1) * 8
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
}
// Write x into b, as binary (GNUtar/star extension).
func (f *formatter) formatNumeric(b []byte, x int64) {
if fitsInBase256(len(b), x) {
for i := len(b) - 1; i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // Highest bit indicates binary format
return
}
f.formatOctal(b, 0) // Last resort, just write zero
f.err = ErrFieldTooLong
}
var ( var (
minTime = time.Unix(0, 0) minTime = time.Unix(0, 0)
// There is room for 11 octal digits (33 bits) of mtime. // There is room for 11 octal digits (33 bits) of mtime.
@ -224,9 +170,41 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone) formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone)
formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone) formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone)
// TODO(dsnet): The logic surrounding the prefix field is broken when trying
// to encode the header as GNU format. The challenge with the current logic
// is that we are unsure what format we are using at any given moment until
// we have processed *all* of the fields. The problem is that by the time
// all fields have been processed, some work has already been done to handle
// each field under the assumption that it is for one given format or
// another. In some situations, this causes the Writer to be confused and
// encode a prefix field when the format being used is GNU. Thus, producing
// an invalid tar file.
//
// As a short-term fix, we disable the logic to use the prefix field, which
// will force the badly generated GNU files to become encoded as being
// the PAX format.
//
// As an alternative fix, we could hard-code preferPax to be true. However,
// this is problematic for the following reasons:
// * The preferPax functionality is not tested at all.
// * This can result in headers that try to use both the GNU and PAX
// features at the same time, which is also wrong.
//
// The proper fix for this is to use a two-pass method:
// * The first pass simply determines what set of formats can possibly
// encode the given header.
// * The second pass actually encodes the header as that given format
// without worrying about violating the format.
//
// See the following:
// https://golang.org/issue/12594
// https://golang.org/issue/17630
// https://golang.org/issue/9683
const usePrefix = false
// try to use a ustar header when only the name is too long // try to use a ustar header when only the name is too long
_, paxPathUsed := paxHeaders[paxPath] _, paxPathUsed := paxHeaders[paxPath]
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { if usePrefix && !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
prefix, suffix, ok := splitUSTARPath(hdr.Name) prefix, suffix, ok := splitUSTARPath(hdr.Name)
if ok { if ok {
// Since we can encode in USTAR format, disable PAX header. // Since we can encode in USTAR format, disable PAX header.
@ -317,7 +295,7 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro
var buf bytes.Buffer var buf bytes.Buffer
// Keys are sorted before writing to body to allow deterministic output. // Keys are sorted before writing to body to allow deterministic output.
var keys []string keys := make([]string, 0, len(paxHeaders))
for k := range paxHeaders { for k := range paxHeaders {
keys = append(keys, k) keys = append(keys, k)
} }
@ -340,22 +318,6 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro
return nil return nil
} }
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
func formatPAXRecord(k, v string) string {
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
}
return record
}
// Write writes to the current entry in the tar archive. // Write writes to the current entry in the tar archive.
// Write returns the error ErrWriteTooLong if more than // Write returns the error ErrWriteTooLong if more than
// hdr.Size bytes are written after WriteHeader. // hdr.Size bytes are written after WriteHeader.

View file

@ -9,7 +9,6 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math"
"os" "os"
"reflect" "reflect"
"sort" "sort"
@ -19,176 +18,6 @@ import (
"time" "time"
) )
type writerTestEntry struct {
header *Header
contents string
}
type writerTest struct {
file string // filename of expected output
entries []*writerTestEntry
}
var writerTests = []*writerTest{
// The writer test file was produced with this command:
// tar (GNU tar) 1.26
// ln -s small.txt link.txt
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
{
file: "testdata/writer.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "small.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 5,
ModTime: time.Unix(1246508266, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Kilts",
},
{
header: &Header{
Name: "small2.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 11,
ModTime: time.Unix(1245217492, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Google.com\n",
},
{
header: &Header{
Name: "link.txt",
Mode: 0777,
Uid: 1000,
Gid: 1000,
Size: 0,
ModTime: time.Unix(1314603082, 0),
Typeflag: '2',
Linkname: "small.txt",
Uname: "strings",
Gname: "strings",
},
// no contents
},
},
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
{
file: "testdata/writer-big.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "tmp/16gig.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 16 << 30,
ModTime: time.Unix(1254699560, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
},
},
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
// tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
{
file: "testdata/writer-big-long.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: strings.Repeat("longname/", 15) + "16gig.txt",
Mode: 0644,
Uid: 1000,
Gid: 1000,
Size: 16 << 30,
ModTime: time.Unix(1399583047, 0),
Typeflag: '0',
Uname: "guillaume",
Gname: "guillaume",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
},
},
},
// This file was produced using gnu tar 1.17
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
{
file: "testdata/ustar.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: strings.Repeat("longname/", 15) + "file.txt",
Mode: 0644,
Uid: 0765,
Gid: 024,
Size: 06,
ModTime: time.Unix(1360135598, 0),
Typeflag: '0',
Uname: "shane",
Gname: "staff",
},
contents: "hello\n",
},
},
},
// This file was produced using gnu tar 1.26
// echo "Slartibartfast" > file.txt
// ln file.txt hard.txt
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
{
file: "testdata/hardlink.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "file.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 15,
ModTime: time.Unix(1425484303, 0),
Typeflag: '0',
Uname: "vbatts",
Gname: "users",
},
contents: "Slartibartfast\n",
},
{
header: &Header{
Name: "hard.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 0,
ModTime: time.Unix(1425484303, 0),
Typeflag: '1',
Linkname: "file.txt",
Uname: "vbatts",
Gname: "users",
},
// no contents
},
},
},
}
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection. // Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
func bytestr(offset int, b []byte) string { func bytestr(offset int, b []byte) string {
const rowLen = 32 const rowLen = 32
@ -228,9 +57,168 @@ func bytediff(a []byte, b []byte) string {
} }
func TestWriter(t *testing.T) { func TestWriter(t *testing.T) {
type entry struct {
header *Header
contents string
}
vectors := []struct {
file string // filename of expected output
entries []*entry
}{{
// The writer test file was produced with this command:
// tar (GNU tar) 1.26
// ln -s small.txt link.txt
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
file: "testdata/writer.tar",
entries: []*entry{{
header: &Header{
Name: "small.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 5,
ModTime: time.Unix(1246508266, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Kilts",
}, {
header: &Header{
Name: "small2.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 11,
ModTime: time.Unix(1245217492, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Google.com\n",
}, {
header: &Header{
Name: "link.txt",
Mode: 0777,
Uid: 1000,
Gid: 1000,
Size: 0,
ModTime: time.Unix(1314603082, 0),
Typeflag: '2',
Linkname: "small.txt",
Uname: "strings",
Gname: "strings",
},
// no contents
}},
}, {
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
file: "testdata/writer-big.tar",
entries: []*entry{{
header: &Header{
Name: "tmp/16gig.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 16 << 30,
ModTime: time.Unix(1254699560, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
}},
}, {
// This truncated file was produced using this library.
// It was verified to work with GNU tar 1.27.1 and BSD tar 3.1.2.
// dd if=/dev/zero bs=1G count=16 >> writer-big-long.tar
// gnutar -xvf writer-big-long.tar
// bsdtar -xvf writer-big-long.tar
//
// This file is in PAX format.
file: "testdata/writer-big-long.tar",
entries: []*entry{{
header: &Header{
Name: strings.Repeat("longname/", 15) + "16gig.txt",
Mode: 0644,
Uid: 1000,
Gid: 1000,
Size: 16 << 30,
ModTime: time.Unix(1399583047, 0),
Typeflag: '0',
Uname: "guillaume",
Gname: "guillaume",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
}},
}, {
// TODO(dsnet): The Writer output should match the following file.
// To fix an issue (see https://golang.org/issue/12594), we disabled
// prefix support, which alters the generated output.
/*
// This file was produced using gnu tar 1.17
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
file: "testdata/ustar.tar"
*/
file: "testdata/ustar.issue12594.tar", // This is a valid tar file, but not expected
entries: []*entry{{
header: &Header{
Name: strings.Repeat("longname/", 15) + "file.txt",
Mode: 0644,
Uid: 0765,
Gid: 024,
Size: 06,
ModTime: time.Unix(1360135598, 0),
Typeflag: '0',
Uname: "shane",
Gname: "staff",
},
contents: "hello\n",
}},
}, {
// This file was produced using gnu tar 1.26
// echo "Slartibartfast" > file.txt
// ln file.txt hard.txt
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
file: "testdata/hardlink.tar",
entries: []*entry{{
header: &Header{
Name: "file.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 15,
ModTime: time.Unix(1425484303, 0),
Typeflag: '0',
Uname: "vbatts",
Gname: "users",
},
contents: "Slartibartfast\n",
}, {
header: &Header{
Name: "hard.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 0,
ModTime: time.Unix(1425484303, 0),
Typeflag: '1',
Linkname: "file.txt",
Uname: "vbatts",
Gname: "users",
},
// no contents
}},
}}
testLoop: testLoop:
for i, test := range writerTests { for i, v := range vectors {
expected, err := ioutil.ReadFile(test.file) expected, err := ioutil.ReadFile(v.file)
if err != nil { if err != nil {
t.Errorf("test %d: Unexpected error: %v", i, err) t.Errorf("test %d: Unexpected error: %v", i, err)
continue continue
@ -239,7 +227,7 @@ testLoop:
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
big := false big := false
for j, entry := range test.entries { for j, entry := range v.entries {
big = big || entry.header.Size > 1<<10 big = big || entry.header.Size > 1<<10
if err := tw.WriteHeader(entry.header); err != nil { if err := tw.WriteHeader(entry.header); err != nil {
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err) t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
@ -576,9 +564,9 @@ func TestWriteAfterClose(t *testing.T) {
} }
func TestSplitUSTARPath(t *testing.T) { func TestSplitUSTARPath(t *testing.T) {
var sr = strings.Repeat sr := strings.Repeat
var vectors = []struct { vectors := []struct {
input string // Input path input string // Input path
prefix string // Expected output prefix prefix string // Expected output prefix
suffix string // Expected output suffix suffix string // Expected output suffix
@ -609,114 +597,51 @@ func TestSplitUSTARPath(t *testing.T) {
} }
} }
func TestFormatPAXRecord(t *testing.T) { // TestIssue12594 tests that the Writer does not attempt to populate the prefix
var medName = strings.Repeat("CD", 50) // field when encoding a header in the GNU format. The prefix field is valid
var longName = strings.Repeat("AB", 100) // in USTAR and PAX, but not GNU.
func TestIssue12594(t *testing.T) {
var vectors = []struct { names := []string{
inputKey string "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/file.txt",
inputVal string "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/file.txt",
output string "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/333/file.txt",
}{ "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/34/35/36/37/38/39/40/file.txt",
{"k", "v", "6 k=v\n"}, "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000/file.txt",
{"path", "/etc/hosts", "19 path=/etc/hosts\n"}, "/home/support/.openoffice.org/3/user/uno_packages/cache/registry/com.sun.star.comp.deployment.executable.PackageRegistryBackend",
{"path", longName, "210 path=" + longName + "\n"},
{"path", medName, "110 path=" + medName + "\n"},
{"foo", "ba", "9 foo=ba\n"},
{"foo", "bar", "11 foo=bar\n"},
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
} }
for _, v := range vectors { for i, name := range names {
output := formatPAXRecord(v.inputKey, v.inputVal) var b bytes.Buffer
if output != v.output {
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q", tw := NewWriter(&b)
v.inputKey, v.inputVal, output, v.output) if err := tw.WriteHeader(&Header{
} Name: name,
} Uid: 1 << 25, // Prevent USTAR format
} }); err != nil {
t.Errorf("test %d, unexpected WriteHeader error: %v", i, err)
func TestFitsInBase256(t *testing.T) { }
var vectors = []struct { if err := tw.Close(); err != nil {
input int64 t.Errorf("test %d, unexpected Close error: %v", i, err)
width int }
ok bool
}{ // The prefix field should never appear in the GNU format.
{+1, 8, true}, var blk block
{0, 8, true}, copy(blk[:], b.Bytes())
{-1, 8, true}, prefix := string(blk.USTAR().Prefix())
{1 << 56, 8, false}, if i := strings.IndexByte(prefix, 0); i >= 0 {
{(1 << 56) - 1, 8, true}, prefix = prefix[:i] // Truncate at the NUL terminator
{-1 << 56, 8, true}, }
{(-1 << 56) - 1, 8, false}, if blk.GetFormat() == formatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) {
{121654, 8, true}, t.Errorf("test %d, found prefix in GNU format: %s", i, prefix)
{-9849849, 8, true}, }
{math.MaxInt64, 9, true},
{0, 9, true}, tr := NewReader(&b)
{math.MinInt64, 9, true}, hdr, err := tr.Next()
{math.MaxInt64, 12, true}, if err != nil {
{0, 12, true}, t.Errorf("test %d, unexpected Next error: %v", i, err)
{math.MinInt64, 12, true}, }
} if hdr.Name != name {
t.Errorf("test %d, hdr.Name = %s, want %s", i, hdr.Name, name)
for _, v := range vectors {
ok := fitsInBase256(v.width, v.input)
if ok != v.ok {
t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
}
}
}
func TestFormatNumeric(t *testing.T) {
var vectors = []struct {
input int64
output string
ok bool
}{
// Test base-256 (binary) encoded values.
{-1, "\xff", true},
{-1, "\xff\xff", true},
{-1, "\xff\xff\xff", true},
{(1 << 0), "0", false},
{(1 << 8) - 1, "\x80\xff", true},
{(1 << 8), "0\x00", false},
{(1 << 16) - 1, "\x80\xff\xff", true},
{(1 << 16), "00\x00", false},
{-1 * (1 << 0), "\xff", true},
{-1*(1<<0) - 1, "0", false},
{-1 * (1 << 8), "\xff\x00", true},
{-1*(1<<8) - 1, "0\x00", false},
{-1 * (1 << 16), "\xff\x00\x00", true},
{-1*(1<<16) - 1, "00\x00", false},
{537795476381659745, "0000000\x00", false},
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
{-615126028225187231, "0000000\x00", false},
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
{math.MaxInt64, "0000000\x00", false},
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "0000000\x00", false},
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
}
for _, v := range vectors {
var f formatter
output := make([]byte, len(v.output))
f.formatNumeric(output, v.input)
ok := (f.err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input)
} else {
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input)
}
}
if string(output) != v.output {
t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output)
} }
} }
} }

View file

@ -22,6 +22,10 @@ type Writer struct {
last *fileWriter last *fileWriter
closed bool closed bool
compressors map[uint16]Compressor compressors map[uint16]Compressor
// testHookCloseSizeOffset if non-nil is called with the size
// of offset of the central directory at Close.
testHookCloseSizeOffset func(size, offset uint64)
} }
type header struct { type header struct {
@ -98,6 +102,7 @@ func (w *Writer) Close() error {
b.uint32(h.CompressedSize) b.uint32(h.CompressedSize)
b.uint32(h.UncompressedSize) b.uint32(h.UncompressedSize)
} }
b.uint16(uint16(len(h.Name))) b.uint16(uint16(len(h.Name)))
b.uint16(uint16(len(h.Extra))) b.uint16(uint16(len(h.Extra)))
b.uint16(uint16(len(h.Comment))) b.uint16(uint16(len(h.Comment)))
@ -127,7 +132,11 @@ func (w *Writer) Close() error {
size := uint64(end - start) size := uint64(end - start)
offset := uint64(start) offset := uint64(start)
if records > uint16max || size > uint32max || offset > uint32max { if f := w.testHookCloseSizeOffset; f != nil {
f(size, offset)
}
if records >= uint16max || size >= uint32max || offset >= uint32max {
var buf [directory64EndLen + directory64LocLen]byte var buf [directory64EndLen + directory64LocLen]byte
b := writeBuf(buf[:]) b := writeBuf(buf[:])

View file

@ -8,8 +8,10 @@ package zip
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"hash" "hash"
"internal/race"
"internal/testenv" "internal/testenv"
"io" "io"
"io/ioutil" "io/ioutil"
@ -232,6 +234,7 @@ func TestZip64(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("slow test; skipping") t.Skip("slow test; skipping")
} }
t.Parallel()
const size = 1 << 32 // before the "END\n" part const size = 1 << 32 // before the "END\n" part
buf := testZip64(t, size) buf := testZip64(t, size)
testZip64DirectoryRecordLength(buf, t) testZip64DirectoryRecordLength(buf, t)
@ -241,6 +244,7 @@ func TestZip64EdgeCase(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("slow test; skipping") t.Skip("slow test; skipping")
} }
t.Parallel()
// Test a zip file with uncompressed size 0xFFFFFFFF. // Test a zip file with uncompressed size 0xFFFFFFFF.
// That's the magic marker for a 64-bit file, so even though // That's the magic marker for a 64-bit file, so even though
// it fits in a 32-bit field we must use the 64-bit field. // it fits in a 32-bit field we must use the 64-bit field.
@ -251,6 +255,256 @@ func TestZip64EdgeCase(t *testing.T) {
testZip64DirectoryRecordLength(buf, t) testZip64DirectoryRecordLength(buf, t)
} }
// Tests that we generate a zip64 file if the the directory at offset
// 0xFFFFFFFF, but not before.
func TestZip64DirectoryOffset(t *testing.T) {
if testing.Short() && race.Enabled {
t.Skip("skipping in short mode")
}
t.Parallel()
const filename = "huge.txt"
gen := func(wantOff uint64) func(*Writer) {
return func(w *Writer) {
w.testHookCloseSizeOffset = func(size, off uint64) {
if off != wantOff {
t.Errorf("central directory offset = %d (%x); want %d", off, off, wantOff)
}
}
f, err := w.CreateHeader(&FileHeader{
Name: filename,
Method: Store,
})
if err != nil {
t.Fatal(err)
}
f.(*fileWriter).crc32 = fakeHash32{}
size := wantOff - fileHeaderLen - uint64(len(filename)) - dataDescriptorLen
if _, err := io.CopyN(f, zeros{}, int64(size)); err != nil {
t.Fatal(err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
}
}
t.Run("uint32max-2_NoZip64", func(t *testing.T) {
t.Parallel()
if generatesZip64(t, gen(0xfffffffe)) {
t.Error("unexpected zip64")
}
})
t.Run("uint32max-1_Zip64", func(t *testing.T) {
t.Parallel()
if !generatesZip64(t, gen(0xffffffff)) {
t.Error("expected zip64")
}
})
}
// At 16k records, we need to generate a zip64 file.
func TestZip64ManyRecords(t *testing.T) {
if testing.Short() && race.Enabled {
t.Skip("skipping in short mode")
}
t.Parallel()
gen := func(numRec int) func(*Writer) {
return func(w *Writer) {
for i := 0; i < numRec; i++ {
_, err := w.CreateHeader(&FileHeader{
Name: "a.txt",
Method: Store,
})
if err != nil {
t.Fatal(err)
}
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
}
}
// 16k-1 records shouldn't make a zip64:
t.Run("uint16max-1_NoZip64", func(t *testing.T) {
t.Parallel()
if generatesZip64(t, gen(0xfffe)) {
t.Error("unexpected zip64")
}
})
// 16k records should make a zip64:
t.Run("uint16max_Zip64", func(t *testing.T) {
t.Parallel()
if !generatesZip64(t, gen(0xffff)) {
t.Error("expected zip64")
}
})
}
// suffixSaver is an io.Writer & io.ReaderAt that remembers the last 0
// to 'keep' bytes of data written to it. Call Suffix to get the
// suffix bytes.
type suffixSaver struct {
keep int
buf []byte
start int
size int64
}
func (ss *suffixSaver) Size() int64 { return ss.size }
var errDiscardedBytes = errors.New("ReadAt of discarded bytes")
func (ss *suffixSaver) ReadAt(p []byte, off int64) (n int, err error) {
back := ss.size - off
if back > int64(ss.keep) {
return 0, errDiscardedBytes
}
suf := ss.Suffix()
n = copy(p, suf[len(suf)-int(back):])
if n != len(p) {
err = io.EOF
}
return
}
func (ss *suffixSaver) Suffix() []byte {
if len(ss.buf) < ss.keep {
return ss.buf
}
buf := make([]byte, ss.keep)
n := copy(buf, ss.buf[ss.start:])
copy(buf[n:], ss.buf[:])
return buf
}
func (ss *suffixSaver) Write(p []byte) (n int, err error) {
n = len(p)
ss.size += int64(len(p))
if len(ss.buf) < ss.keep {
space := ss.keep - len(ss.buf)
add := len(p)
if add > space {
add = space
}
ss.buf = append(ss.buf, p[:add]...)
p = p[add:]
}
for len(p) > 0 {
n := copy(ss.buf[ss.start:], p)
p = p[n:]
ss.start += n
if ss.start == ss.keep {
ss.start = 0
}
}
return
}
// generatesZip64 reports whether f wrote a zip64 file.
// f is also responsible for closing w.
func generatesZip64(t *testing.T, f func(w *Writer)) bool {
ss := &suffixSaver{keep: 10 << 20}
w := NewWriter(ss)
f(w)
return suffixIsZip64(t, ss)
}
type sizedReaderAt interface {
io.ReaderAt
Size() int64
}
func suffixIsZip64(t *testing.T, zip sizedReaderAt) bool {
d := make([]byte, 1024)
if _, err := zip.ReadAt(d, zip.Size()-int64(len(d))); err != nil {
t.Fatalf("ReadAt: %v", err)
}
sigOff := findSignatureInBlock(d)
if sigOff == -1 {
t.Errorf("failed to find signature in block")
return false
}
dirOff, err := findDirectory64End(zip, zip.Size()-int64(len(d))+int64(sigOff))
if err != nil {
t.Fatalf("findDirectory64End: %v", err)
}
if dirOff == -1 {
return false
}
d = make([]byte, directory64EndLen)
if _, err := zip.ReadAt(d, dirOff); err != nil {
t.Fatalf("ReadAt(off=%d): %v", dirOff, err)
}
b := readBuf(d)
if sig := b.uint32(); sig != directory64EndSignature {
return false
}
size := b.uint64()
if size != directory64EndLen-12 {
t.Errorf("expected length of %d, got %d", directory64EndLen-12, size)
}
return true
}
// Zip64 is required if the total size of the records is uint32max.
func TestZip64LargeDirectory(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
t.Parallel()
// gen returns a func that writes a zip with a wantLen bytes
// of central directory.
gen := func(wantLen int64) func(*Writer) {
return func(w *Writer) {
w.testHookCloseSizeOffset = func(size, off uint64) {
if size != uint64(wantLen) {
t.Errorf("Close central directory size = %d; want %d", size, wantLen)
}
}
uint16string := strings.Repeat(".", uint16max)
remain := wantLen
for remain > 0 {
commentLen := int(uint16max) - directoryHeaderLen - 1
thisRecLen := directoryHeaderLen + int(uint16max) + commentLen
if int64(thisRecLen) > remain {
remove := thisRecLen - int(remain)
commentLen -= remove
thisRecLen -= remove
}
remain -= int64(thisRecLen)
f, err := w.CreateHeader(&FileHeader{
Name: uint16string,
Comment: uint16string[:commentLen],
})
if err != nil {
t.Fatalf("CreateHeader: %v", err)
}
f.(*fileWriter).crc32 = fakeHash32{}
}
if err := w.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
}
}
t.Run("uint32max-1_NoZip64", func(t *testing.T) {
t.Parallel()
if generatesZip64(t, gen(uint32max-1)) {
t.Error("unexpected zip64")
}
})
t.Run("uint32max_HasZip64", func(t *testing.T) {
t.Parallel()
if !generatesZip64(t, gen(uint32max)) {
t.Error("expected zip64")
}
})
}
func testZip64(t testing.TB, size int64) *rleBuffer { func testZip64(t testing.TB, size int64) *rleBuffer {
const chunkSize = 1024 const chunkSize = 1024
chunks := int(size / chunkSize) chunks := int(size / chunkSize)
@ -339,30 +593,8 @@ func testZip64(t testing.TB, size int64) *rleBuffer {
// Issue 9857 // Issue 9857
func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) { func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) {
d := make([]byte, 1024) if !suffixIsZip64(t, buf) {
if _, err := buf.ReadAt(d, buf.Size()-int64(len(d))); err != nil { t.Fatal("not a zip64")
t.Fatal("read:", err)
}
sigOff := findSignatureInBlock(d)
dirOff, err := findDirectory64End(buf, buf.Size()-int64(len(d))+int64(sigOff))
if err != nil {
t.Fatal("findDirectory64End:", err)
}
d = make([]byte, directory64EndLen)
if _, err := buf.ReadAt(d, dirOff); err != nil {
t.Fatal("read:", err)
}
b := readBuf(d)
if sig := b.uint32(); sig != directory64EndSignature {
t.Fatalf("Expected directory64EndSignature (%d), got %d", directory64EndSignature, sig)
}
size := b.uint64()
if size != directory64EndLen-12 {
t.Fatalf("Expected length of %d, got %d", directory64EndLen-12, size)
} }
} }
@ -448,3 +680,47 @@ func BenchmarkZip64Test(b *testing.B) {
testZip64(b, 1<<26) testZip64(b, 1<<26)
} }
} }
func TestSuffixSaver(t *testing.T) {
const keep = 10
ss := &suffixSaver{keep: keep}
ss.Write([]byte("abc"))
if got := string(ss.Suffix()); got != "abc" {
t.Errorf("got = %q; want abc", got)
}
ss.Write([]byte("defghijklmno"))
if got := string(ss.Suffix()); got != "fghijklmno" {
t.Errorf("got = %q; want fghijklmno", got)
}
if got, want := ss.Size(), int64(len("abc")+len("defghijklmno")); got != want {
t.Errorf("Size = %d; want %d", got, want)
}
buf := make([]byte, ss.Size())
for off := int64(0); off < ss.Size(); off++ {
for size := 1; size <= int(ss.Size()-off); size++ {
readBuf := buf[:size]
n, err := ss.ReadAt(readBuf, off)
if off < ss.Size()-keep {
if err != errDiscardedBytes {
t.Errorf("off %d, size %d = %v, %v (%q); want errDiscardedBytes", off, size, n, err, readBuf[:n])
}
continue
}
want := "abcdefghijklmno"[off : off+int64(size)]
got := string(readBuf[:n])
if err != nil || got != want {
t.Errorf("off %d, size %d = %v, %v (%q); want %q", off, size, n, err, got, want)
}
}
}
}
type zeros struct{}
func (zeros) Read(p []byte) (int, error) {
for i := range p {
p[i] = 0
}
return len(p), nil
}

View file

@ -206,10 +206,18 @@ func (b *Reader) Read(p []byte) (n int, err error) {
} }
return n, b.readErr() return n, b.readErr()
} }
b.fill() // buffer is empty // One read.
if b.r == b.w { // Do not use b.fill, which will loop.
b.r = 0
b.w = 0
n, b.err = b.rd.Read(b.buf)
if n < 0 {
panic(errNegativeRead)
}
if n == 0 {
return 0, b.readErr() return 0, b.readErr()
} }
b.w += n
} }
// copy as much as we can // copy as much as we can
@ -549,11 +557,6 @@ func (b *Writer) Reset(w io.Writer) {
// Flush writes any buffered data to the underlying io.Writer. // Flush writes any buffered data to the underlying io.Writer.
func (b *Writer) Flush() error { func (b *Writer) Flush() error {
err := b.flush()
return err
}
func (b *Writer) flush() error {
if b.err != nil { if b.err != nil {
return b.err return b.err
} }
@ -596,7 +599,7 @@ func (b *Writer) Write(p []byte) (nn int, err error) {
} else { } else {
n = copy(b.buf[b.n:], p) n = copy(b.buf[b.n:], p)
b.n += n b.n += n
b.flush() b.Flush()
} }
nn += n nn += n
p = p[n:] p = p[n:]
@ -615,7 +618,7 @@ func (b *Writer) WriteByte(c byte) error {
if b.err != nil { if b.err != nil {
return b.err return b.err
} }
if b.Available() <= 0 && b.flush() != nil { if b.Available() <= 0 && b.Flush() != nil {
return b.err return b.err
} }
b.buf[b.n] = c b.buf[b.n] = c
@ -638,7 +641,7 @@ func (b *Writer) WriteRune(r rune) (size int, err error) {
} }
n := b.Available() n := b.Available()
if n < utf8.UTFMax { if n < utf8.UTFMax {
if b.flush(); b.err != nil { if b.Flush(); b.err != nil {
return 0, b.err return 0, b.err
} }
n = b.Available() n = b.Available()
@ -663,7 +666,7 @@ func (b *Writer) WriteString(s string) (int, error) {
b.n += n b.n += n
nn += n nn += n
s = s[n:] s = s[n:]
b.flush() b.Flush()
} }
if b.err != nil { if b.err != nil {
return nn, b.err return nn, b.err
@ -684,7 +687,7 @@ func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
var m int var m int
for { for {
if b.Available() == 0 { if b.Available() == 0 {
if err1 := b.flush(); err1 != nil { if err1 := b.Flush(); err1 != nil {
return n, err1 return n, err1
} }
} }
@ -708,7 +711,7 @@ func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
if err == io.EOF { if err == io.EOF {
// If we filled the buffer exactly, flush preemptively. // If we filled the buffer exactly, flush preemptively.
if b.Available() == 0 { if b.Available() == 0 {
err = b.flush() err = b.Flush()
} else { } else {
err = nil err = nil
} }

View file

@ -1236,6 +1236,27 @@ func TestWriterReadFromErrNoProgress(t *testing.T) {
} }
} }
func TestReadZero(t *testing.T) {
for _, size := range []int{100, 2} {
t.Run(fmt.Sprintf("bufsize=%d", size), func(t *testing.T) {
r := io.MultiReader(strings.NewReader("abc"), &emptyThenNonEmptyReader{r: strings.NewReader("def"), n: 1})
br := NewReaderSize(r, size)
want := func(s string, wantErr error) {
p := make([]byte, 50)
n, err := br.Read(p)
if err != wantErr || n != len(s) || string(p[:n]) != s {
t.Fatalf("read(%d) = %q, %v, want %q, %v", len(p), string(p[:n]), err, s, wantErr)
}
t.Logf("read(%d) = %q, %v", len(p), string(p[:n]), err)
}
want("abc", nil)
want("", nil)
want("def", nil)
want("", io.EOF)
})
}
}
func TestReaderReset(t *testing.T) { func TestReaderReset(t *testing.T) {
r := NewReader(strings.NewReader("foo foo")) r := NewReader(strings.NewReader("foo foo"))
buf := make([]byte, 3) buf := make([]byte, 3)

View file

@ -199,7 +199,6 @@ func (s *Scanner) Scan() bool {
s.buf = newBuf s.buf = newBuf
s.end -= s.start s.end -= s.start
s.start = 0 s.start = 0
continue
} }
// Finally we can read some input. Make sure we don't get stuck with // Finally we can read some input. Make sure we don't get stuck with
// a misbehaving Reader. Officially we don't need to do this, but let's // a misbehaving Reader. Officially we don't need to do this, but let's

View file

@ -173,8 +173,8 @@ func cap(v Type) int
// specify a different capacity; it must be no smaller than the // specify a different capacity; it must be no smaller than the
// length, so make([]int, 0, 10) allocates a slice of length 0 and // length, so make([]int, 0, 10) allocates a slice of length 0 and
// capacity 10. // capacity 10.
// Map: An initial allocation is made according to the size but the // Map: An empty map is allocated with enough space to hold the
// resulting map has length 0. The size may be omitted, in which case // specified number of elements. The size may be omitted, in which case
// a small starting size is allocated. // a small starting size is allocated.
// Channel: The channel's buffer is initialized with the specified // Channel: The channel's buffer is initialized with the specified
// buffer capacity. If zero, or the size is omitted, the channel is // buffer capacity. If zero, or the size is omitted, the channel is

View file

@ -15,22 +15,25 @@ import (
// A Buffer is a variable-sized buffer of bytes with Read and Write methods. // A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use. // The zero value for Buffer is an empty buffer ready to use.
type Buffer struct { type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)] buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)] off int // read at &buf[off], write at &buf[len(buf)]
runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each call to WriteRune bootstrap [64]byte // memory to hold first slice; helps small buffers avoid allocation.
bootstrap [64]byte // memory to hold first slice; helps small buffers avoid allocation. lastRead readOp // last read operation, so that Unread* can work correctly.
lastRead readOp // last read operation, so that Unread* can work correctly.
} }
// The readOp constants describe the last action performed on // The readOp constants describe the last action performed on
// the buffer, so that UnreadRune and UnreadByte can // the buffer, so that UnreadRune and UnreadByte can check for
// check for invalid usage. // invalid usage. opReadRuneX constants are chosen such that
// converted to int they correspond to the rune size that was read.
type readOp int type readOp int
const ( const (
opInvalid readOp = iota // Non-read operation. opRead readOp = -1 // Any other read operation.
opReadRune // Read rune. opInvalid = 0 // Non-read operation.
opRead // Any other read operation. opReadRune1 = 1 // Read rune of size 1.
opReadRune2 = 2 // Read rune of size 2.
opReadRune3 = 3 // Read rune of size 3.
opReadRune4 = 4 // Read rune of size 4.
) )
// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer. // ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
@ -246,8 +249,10 @@ func (b *Buffer) WriteRune(r rune) (n int, err error) {
b.WriteByte(byte(r)) b.WriteByte(byte(r))
return 1, nil return 1, nil
} }
n = utf8.EncodeRune(b.runeBytes[0:], r) b.lastRead = opInvalid
b.Write(b.runeBytes[0:n]) m := b.grow(utf8.UTFMax)
n = utf8.EncodeRune(b.buf[m:m+utf8.UTFMax], r)
b.buf = b.buf[:m+n]
return n, nil return n, nil
} }
@ -318,14 +323,15 @@ func (b *Buffer) ReadRune() (r rune, size int, err error) {
b.Truncate(0) b.Truncate(0)
return 0, 0, io.EOF return 0, 0, io.EOF
} }
b.lastRead = opReadRune
c := b.buf[b.off] c := b.buf[b.off]
if c < utf8.RuneSelf { if c < utf8.RuneSelf {
b.off++ b.off++
b.lastRead = opReadRune1
return rune(c), 1, nil return rune(c), 1, nil
} }
r, n := utf8.DecodeRune(b.buf[b.off:]) r, n := utf8.DecodeRune(b.buf[b.off:])
b.off += n b.off += n
b.lastRead = readOp(n)
return r, n, nil return r, n, nil
} }
@ -335,14 +341,13 @@ func (b *Buffer) ReadRune() (r rune, size int, err error) {
// it is stricter than UnreadByte, which will unread the last byte // it is stricter than UnreadByte, which will unread the last byte
// from any read operation.) // from any read operation.)
func (b *Buffer) UnreadRune() error { func (b *Buffer) UnreadRune() error {
if b.lastRead != opReadRune { if b.lastRead <= opInvalid {
return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune") return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
} }
b.lastRead = opInvalid if b.off >= int(b.lastRead) {
if b.off > 0 { b.off -= int(b.lastRead)
_, n := utf8.DecodeLastRune(b.buf[0:b.off])
b.off -= n
} }
b.lastRead = opInvalid
return nil return nil
} }
@ -350,7 +355,7 @@ func (b *Buffer) UnreadRune() error {
// read operation. If write has happened since the last read, UnreadByte // read operation. If write has happened since the last read, UnreadByte
// returns an error. // returns an error.
func (b *Buffer) UnreadByte() error { func (b *Buffer) UnreadByte() error {
if b.lastRead != opReadRune && b.lastRead != opRead { if b.lastRead == opInvalid {
return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read") return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read")
} }
b.lastRead = opInvalid b.lastRead = opInvalid

View file

@ -514,6 +514,19 @@ func TestBufferGrowth(t *testing.T) {
} }
} }
func BenchmarkWriteRune(b *testing.B) {
const n = 4 << 10
const r = '☺'
b.SetBytes(int64(n * utf8.RuneLen(r)))
buf := NewBuffer(make([]byte, n*utf8.UTFMax))
for i := 0; i < b.N; i++ {
buf.Reset()
for i := 0; i < n; i++ {
buf.WriteRune(r)
}
}
}
// From Issue 5154. // From Issue 5154.
func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
buf := make([]byte, 1024) buf := make([]byte, 1024)

View file

@ -93,37 +93,6 @@ func ContainsRune(b []byte, r rune) bool {
return IndexRune(b, r) >= 0 return IndexRune(b, r) >= 0
} }
// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
func Index(s, sep []byte) int {
n := len(sep)
if n == 0 {
return 0
}
if n > len(s) {
return -1
}
c := sep[0]
if n == 1 {
return IndexByte(s, c)
}
i := 0
t := s[:len(s)-n+1]
for i < len(t) {
if t[i] != c {
o := IndexByte(t[i:], c)
if o < 0 {
break
}
i += o
}
if Equal(s[i:i+n], sep) {
return i
}
i++
}
return -1
}
func indexBytePortable(s []byte, c byte) int { func indexBytePortable(s []byte, c byte) int {
for i, b := range s { for i, b := range s {
if b == c { if b == c {
@ -161,15 +130,28 @@ func LastIndexByte(s []byte, c byte) int {
// IndexRune interprets s as a sequence of UTF-8-encoded Unicode code points. // IndexRune interprets s as a sequence of UTF-8-encoded Unicode code points.
// It returns the byte index of the first occurrence in s of the given rune. // It returns the byte index of the first occurrence in s of the given rune.
// It returns -1 if rune is not present in s. // It returns -1 if rune is not present in s.
// If r is utf8.RuneError, it returns the first instance of any
// invalid UTF-8 byte sequence.
func IndexRune(s []byte, r rune) int { func IndexRune(s []byte, r rune) int {
for i := 0; i < len(s); { switch {
r1, size := utf8.DecodeRune(s[i:]) case 0 <= r && r < utf8.RuneSelf:
if r == r1 { return IndexByte(s, byte(r))
return i case r == utf8.RuneError:
for i := 0; i < len(s); {
r1, n := utf8.DecodeRune(s[i:])
if r1 == utf8.RuneError {
return i
}
i += n
} }
i += size return -1
case !utf8.ValidRune(r):
return -1
default:
var b [utf8.UTFMax]byte
n := utf8.EncodeRune(b[:], r)
return Index(s, b[:n])
} }
return -1
} }
// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points. // IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
@ -178,10 +160,19 @@ func IndexRune(s []byte, r rune) int {
// point in common. // point in common.
func IndexAny(s []byte, chars string) int { func IndexAny(s []byte, chars string) int {
if len(chars) > 0 { if len(chars) > 0 {
var r rune if len(s) > 8 {
if as, isASCII := makeASCIISet(chars); isASCII {
for i, c := range s {
if as.contains(c) {
return i
}
}
return -1
}
}
var width int var width int
for i := 0; i < len(s); i += width { for i := 0; i < len(s); i += width {
r = rune(s[i]) r := rune(s[i])
if r < utf8.RuneSelf { if r < utf8.RuneSelf {
width = 1 width = 1
} else { } else {
@ -203,11 +194,21 @@ func IndexAny(s []byte, chars string) int {
// there is no code point in common. // there is no code point in common.
func LastIndexAny(s []byte, chars string) int { func LastIndexAny(s []byte, chars string) int {
if len(chars) > 0 { if len(chars) > 0 {
if len(s) > 8 {
if as, isASCII := makeASCIISet(chars); isASCII {
for i := len(s) - 1; i >= 0; i-- {
if as.contains(s[i]) {
return i
}
}
return -1
}
}
for i := len(s); i > 0; { for i := len(s); i > 0; {
r, size := utf8.DecodeLastRune(s[0:i]) r, size := utf8.DecodeLastRune(s[:i])
i -= size i -= size
for _, ch := range chars { for _, c := range chars {
if r == ch { if r == c {
return i return i
} }
} }
@ -398,7 +399,20 @@ func Map(mapping func(r rune) rune, s []byte) []byte {
} }
// Repeat returns a new byte slice consisting of count copies of b. // Repeat returns a new byte slice consisting of count copies of b.
//
// It panics if count is negative or if
// the result of (len(b) * count) overflows.
func Repeat(b []byte, count int) []byte { func Repeat(b []byte, count int) []byte {
// Since we cannot return an error on overflow,
// we should panic if the repeat will generate
// an overflow.
// See Issue golang.org/issue/16237.
if count < 0 {
panic("bytes: negative Repeat count")
} else if count > 0 && len(b)*count/count != len(b) {
panic("bytes: Repeat count causes overflow")
}
nb := make([]byte, len(b)*count) nb := make([]byte, len(b)*count)
bp := copy(nb, b) bp := copy(nb, b)
for bp < len(nb) { for bp < len(nb) {
@ -419,20 +433,20 @@ func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
// ToUpperSpecial returns a copy of the byte slice s with all Unicode letters mapped to their // ToUpperSpecial returns a copy of the byte slice s with all Unicode letters mapped to their
// upper case, giving priority to the special casing rules. // upper case, giving priority to the special casing rules.
func ToUpperSpecial(_case unicode.SpecialCase, s []byte) []byte { func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(func(r rune) rune { return _case.ToUpper(r) }, s) return Map(func(r rune) rune { return c.ToUpper(r) }, s)
} }
// ToLowerSpecial returns a copy of the byte slice s with all Unicode letters mapped to their // ToLowerSpecial returns a copy of the byte slice s with all Unicode letters mapped to their
// lower case, giving priority to the special casing rules. // lower case, giving priority to the special casing rules.
func ToLowerSpecial(_case unicode.SpecialCase, s []byte) []byte { func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(func(r rune) rune { return _case.ToLower(r) }, s) return Map(func(r rune) rune { return c.ToLower(r) }, s)
} }
// ToTitleSpecial returns a copy of the byte slice s with all Unicode letters mapped to their // ToTitleSpecial returns a copy of the byte slice s with all Unicode letters mapped to their
// title case, giving priority to the special casing rules. // title case, giving priority to the special casing rules.
func ToTitleSpecial(_case unicode.SpecialCase, s []byte) []byte { func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(func(r rune) rune { return _case.ToTitle(r) }, s) return Map(func(r rune) rune { return c.ToTitle(r) }, s)
} }
// isSeparator reports whether the rune could mark a word boundary. // isSeparator reports whether the rune could mark a word boundary.
@ -578,7 +592,43 @@ func lastIndexFunc(s []byte, f func(r rune) bool, truth bool) int {
return -1 return -1
} }
// asciiSet is a 32-byte value, where each bit represents the presence of a
// given ASCII character in the set. The 128-bits of the lower 16 bytes,
// starting with the least-significant bit of the lowest word to the
// most-significant bit of the highest word, map to the full range of all
// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
// ensuring that any non-ASCII character will be reported as not in the set.
type asciiSet [8]uint32
// makeASCIISet creates a set of ASCII characters and reports whether all
// characters in chars are ASCII.
func makeASCIISet(chars string) (as asciiSet, ok bool) {
for i := 0; i < len(chars); i++ {
c := chars[i]
if c >= utf8.RuneSelf {
return as, false
}
as[c>>5] |= 1 << uint(c&31)
}
return as, true
}
// contains reports whether c is inside the set.
func (as *asciiSet) contains(c byte) bool {
return (as[c>>5] & (1 << uint(c&31))) != 0
}
func makeCutsetFunc(cutset string) func(r rune) bool { func makeCutsetFunc(cutset string) func(r rune) bool {
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
return func(r rune) bool {
return r == rune(cutset[0])
}
}
if as, isASCII := makeASCIISet(cutset); isASCII {
return func(r rune) bool {
return r < utf8.RuneSelf && as.contains(byte(r))
}
}
return func(r rune) bool { return func(r rune) bool {
for _, c := range cutset { for _, c := range cutset {
if c == r { if c == r {

View file

@ -0,0 +1,117 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package bytes
//go:noescape
// indexShortStr returns the index of the first instance of c in s, or -1 if c is not present in s.
// indexShortStr requires 2 <= len(c) <= shortStringLen
func indexShortStr(s, c []byte) int // ../runtime/asm_$GOARCH.s
func supportAVX2() bool // ../runtime/asm_$GOARCH.s
var shortStringLen int
func init() {
if supportAVX2() {
shortStringLen = 63
} else {
shortStringLen = 31
}
}
// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
func Index(s, sep []byte) int {
n := len(sep)
switch {
case n == 0:
return 0
case n == 1:
return IndexByte(s, sep[0])
case n == len(s):
if Equal(sep, s) {
return 0
}
return -1
case n > len(s):
return -1
case n <= shortStringLen:
// Use brute force when s and sep both are small
if len(s) <= 64 {
return indexShortStr(s, sep)
}
c := sep[0]
i := 0
t := s[:len(s)-n+1]
fails := 0
for i < len(t) {
if t[i] != c {
// IndexByte skips 16/32 bytes per iteration,
// so it's faster than indexShortStr.
o := IndexByte(t[i:], c)
if o < 0 {
return -1
}
i += o
}
if Equal(s[i:i+n], sep) {
return i
}
fails++
i++
// Switch to indexShortStr when IndexByte produces too many false positives.
// Too many means more that 1 error per 8 characters.
// Allow some errors in the beginning.
if fails > (i+16)/8 {
r := indexShortStr(s[i:], sep)
if r >= 0 {
return r + i
}
return -1
}
}
return -1
}
// Rabin-Karp search
hashsep, pow := hashStr(sep)
var h uint32
for i := 0; i < n; i++ {
h = h*primeRK + uint32(s[i])
}
if h == hashsep && Equal(s[:n], sep) {
return 0
}
for i := n; i < len(s); {
h *= primeRK
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
if h == hashsep && Equal(s[i-n:i], sep) {
return i - n
}
}
return -1
}
// primeRK is the prime base used in Rabin-Karp algorithm.
const primeRK = 16777619
// hashStr returns the hash and the appropriate multiplicative
// factor for use in Rabin-Karp algorithm.
func hashStr(sep []byte) (uint32, uint32) {
hash := uint32(0)
for i := 0; i < len(sep); i++ {
hash = hash*primeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, primeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
}

View file

@ -0,0 +1,41 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// -build !amd64,!s390x
package bytes
// TODO: implements short string optimization on non amd64 platforms
// and get rid of bytes_amd64.go
// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
func Index(s, sep []byte) int {
n := len(sep)
if n == 0 {
return 0
}
if n > len(s) {
return -1
}
c := sep[0]
if n == 1 {
return IndexByte(s, c)
}
i := 0
t := s[:len(s)-n+1]
for i < len(t) {
if t[i] != c {
o := IndexByte(t[i:], c)
if o < 0 {
break
}
i += o
}
if Equal(s[i:i+n], sep) {
return i
}
i++
}
return -1
}

View file

@ -0,0 +1,120 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package bytes
//go:noescape
// indexShortStr returns the index of the first instance of sep in s,
// or -1 if sep is not present in s.
// indexShortStr requires 2 <= len(sep) <= shortStringLen
func indexShortStr(s, c []byte) int // ../runtime/asm_s390x.s
// supportsVX reports whether the vector facility is available.
// indexShortStr must not be called if the vector facility is not
// available.
func supportsVX() bool // ../runtime/asm_s390x.s
var shortStringLen = -1
func init() {
if supportsVX() {
shortStringLen = 64
}
}
// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
func Index(s, sep []byte) int {
n := len(sep)
switch {
case n == 0:
return 0
case n == 1:
return IndexByte(s, sep[0])
case n == len(s):
if Equal(sep, s) {
return 0
}
return -1
case n > len(s):
return -1
case n <= shortStringLen:
// Use brute force when s and sep both are small
if len(s) <= 64 {
return indexShortStr(s, sep)
}
c := sep[0]
i := 0
t := s[:len(s)-n+1]
fails := 0
for i < len(t) {
if t[i] != c {
// IndexByte skips 16/32 bytes per iteration,
// so it's faster than indexShortStr.
o := IndexByte(t[i:], c)
if o < 0 {
return -1
}
i += o
}
if Equal(s[i:i+n], sep) {
return i
}
fails++
i++
// Switch to indexShortStr when IndexByte produces too many false positives.
// Too many means more that 1 error per 8 characters.
// Allow some errors in the beginning.
if fails > (i+16)/8 {
r := indexShortStr(s[i:], sep)
if r >= 0 {
return r + i
}
return -1
}
}
return -1
}
// Rabin-Karp search
hashsep, pow := hashStr(sep)
var h uint32
for i := 0; i < n; i++ {
h = h*primeRK + uint32(s[i])
}
if h == hashsep && Equal(s[:n], sep) {
return 0
}
for i := n; i < len(s); {
h *= primeRK
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
if h == hashsep && Equal(s[i-n:i], sep) {
return i - n
}
}
return -1
}
// primeRK is the prime base used in Rabin-Karp algorithm.
const primeRK = 16777619
// hashStr returns the hash and the appropriate multiplicative
// factor for use in Rabin-Karp algorithm.
func hashStr(sep []byte) (uint32, uint32) {
hash := uint32(0)
for i := 0; i < len(sep); i++ {
hash = hash*primeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, primeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
}

View file

@ -7,8 +7,10 @@ package bytes_test
import ( import (
. "bytes" . "bytes"
"fmt" "fmt"
"internal/testenv"
"math/rand" "math/rand"
"reflect" "reflect"
"strings"
"testing" "testing"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
@ -165,8 +167,12 @@ var indexAnyTests = []BinOpTest{
{"abc", "xyz", -1}, {"abc", "xyz", -1},
{"abc", "xcz", 2}, {"abc", "xcz", 2},
{"ab☺c", "x☺yz", 2}, {"ab☺c", "x☺yz", 2},
{"a☺b☻c☹d", "cx", len("a☺b☻")},
{"a☺b☻c☹d", "uvw☻xyz", len("a☺b")},
{"aRegExp*", ".(|)*+?^$[]", 7}, {"aRegExp*", ".(|)*+?^$[]", 7},
{dots + dots + dots, " ", -1}, {dots + dots + dots, " ", -1},
{"012abcba210", "\xffb", 4},
{"012\x80bcb\x80210", "\xffb", 3},
} }
var lastIndexAnyTests = []BinOpTest{ var lastIndexAnyTests = []BinOpTest{
@ -178,18 +184,13 @@ var lastIndexAnyTests = []BinOpTest{
{"aaa", "a", 2}, {"aaa", "a", 2},
{"abc", "xyz", -1}, {"abc", "xyz", -1},
{"abc", "ab", 1}, {"abc", "ab", 1},
{"a☺b☻c☹d", "uvw☻xyz", 2 + len("☺")}, {"ab☺c", "x☺yz", 2},
{"a☺b☻c☹d", "cx", len("a☺b☻")},
{"a☺b☻c☹d", "uvw☻xyz", len("a☺b")},
{"a.RegExp*", ".(|)*+?^$[]", 8}, {"a.RegExp*", ".(|)*+?^$[]", 8},
{dots + dots + dots, " ", -1}, {dots + dots + dots, " ", -1},
} {"012abcba210", "\xffb", 6},
{"012\x80bcb\x80210", "\xffb", 7},
var indexRuneTests = []BinOpTest{
{"", "a", -1},
{"", "☺", -1},
{"foo", "☹", -1},
{"foo", "o", 1},
{"foo☺bar", "☺", 3},
{"foo☺☻☹bar", "☹", 9},
} }
// Execute f on each test case. funcName should be the name of f; it's used // Execute f on each test case. funcName should be the name of f; it's used
@ -346,14 +347,53 @@ func TestIndexByteSmall(t *testing.T) {
} }
func TestIndexRune(t *testing.T) { func TestIndexRune(t *testing.T) {
for _, tt := range indexRuneTests { tests := []struct {
a := []byte(tt.a) in string
r, _ := utf8.DecodeRuneInString(tt.b) rune rune
pos := IndexRune(a, r) want int
if pos != tt.i { }{
t.Errorf(`IndexRune(%q, '%c') = %v`, tt.a, r, pos) {"", 'a', -1},
{"", '☺', -1},
{"foo", '☹', -1},
{"foo", 'o', 1},
{"foo☺bar", '☺', 3},
{"foo☺☻☹bar", '☹', 9},
{"a A x", 'A', 2},
{"some_text=some_value", '=', 9},
{"☺a", 'a', 3},
{"a☻☺b", '☺', 4},
// RuneError should match any invalid UTF-8 byte sequence.
{"<22>", '<27>', 0},
{"\xff", '<27>', 0},
{"☻x<E298BB>", '<27>', len("☻x")},
{"☻x\xe2\x98", '<27>', len("☻x")},
{"☻x\xe2\x98<39>", '<27>', len("☻x")},
{"☻x\xe2\x98x", '<27>', len("☻x")},
// Invalid rune values should never match.
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", -1, -1},
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", 0xD800, -1}, // Surrogate pair
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", utf8.MaxRune + 1, -1},
}
for _, tt := range tests {
if got := IndexRune([]byte(tt.in), tt.rune); got != tt.want {
t.Errorf("IndexRune(%q, %d) = %v; want %v", tt.in, tt.rune, got, tt.want)
} }
} }
haystack := []byte("test世界")
allocs := testing.AllocsPerRun(1000, func() {
if i := IndexRune(haystack, 's'); i != 2 {
t.Fatalf("'s' at %d; want 2", i)
}
if i := IndexRune(haystack, '世'); i != 4 {
t.Fatalf("'世' at %d; want 4", i)
}
})
if allocs != 0 {
t.Errorf("expected no allocations, got %f", allocs)
}
} }
var bmbuf []byte var bmbuf []byte
@ -370,6 +410,9 @@ func valName(x int) string {
func benchBytes(b *testing.B, sizes []int, f func(b *testing.B, n int)) { func benchBytes(b *testing.B, sizes []int, f func(b *testing.B, n int)) {
for _, n := range sizes { for _, n := range sizes {
if isRaceBuilder && n > 4<<10 {
continue
}
b.Run(valName(n), func(b *testing.B) { b.Run(valName(n), func(b *testing.B) {
if len(bmbuf) < n { if len(bmbuf) < n {
bmbuf = make([]byte, n) bmbuf = make([]byte, n)
@ -382,6 +425,8 @@ func benchBytes(b *testing.B, sizes []int, f func(b *testing.B, n int)) {
var indexSizes = []int{10, 32, 4 << 10, 4 << 20, 64 << 20} var indexSizes = []int{10, 32, 4 << 10, 4 << 20, 64 << 20}
var isRaceBuilder = strings.HasSuffix(testenv.Builder(), "-race")
func BenchmarkIndexByte(b *testing.B) { func BenchmarkIndexByte(b *testing.B) {
benchBytes(b, indexSizes, bmIndexByte(IndexByte)) benchBytes(b, indexSizes, bmIndexByte(IndexByte))
} }
@ -404,6 +449,44 @@ func bmIndexByte(index func([]byte, byte) int) func(b *testing.B, n int) {
} }
} }
func BenchmarkIndexRune(b *testing.B) {
benchBytes(b, indexSizes, bmIndexRune(IndexRune))
}
func BenchmarkIndexRuneASCII(b *testing.B) {
benchBytes(b, indexSizes, bmIndexRuneASCII(IndexRune))
}
func bmIndexRuneASCII(index func([]byte, rune) int) func(b *testing.B, n int) {
return func(b *testing.B, n int) {
buf := bmbuf[0:n]
buf[n-1] = 'x'
for i := 0; i < b.N; i++ {
j := index(buf, 'x')
if j != n-1 {
b.Fatal("bad index", j)
}
}
buf[n-1] = '\x00'
}
}
func bmIndexRune(index func([]byte, rune) int) func(b *testing.B, n int) {
return func(b *testing.B, n int) {
buf := bmbuf[0:n]
utf8.EncodeRune(buf[n-3:], '世')
for i := 0; i < b.N; i++ {
j := index(buf, '世')
if j != n-3 {
b.Fatal("bad index", j)
}
}
buf[n-3] = '\x00'
buf[n-2] = '\x00'
buf[n-1] = '\x00'
}
}
func BenchmarkEqual(b *testing.B) { func BenchmarkEqual(b *testing.B) {
b.Run("0", func(b *testing.B) { b.Run("0", func(b *testing.B) {
var buf [4]byte var buf [4]byte
@ -844,6 +927,54 @@ func TestRepeat(t *testing.T) {
} }
} }
func repeat(b []byte, count int) (err error) {
defer func() {
if r := recover(); r != nil {
switch v := r.(type) {
case error:
err = v
default:
err = fmt.Errorf("%s", v)
}
}
}()
Repeat(b, count)
return
}
// See Issue golang.org/issue/16237
func TestRepeatCatchesOverflow(t *testing.T) {
tests := [...]struct {
s string
count int
errStr string
}{
0: {"--", -2147483647, "negative"},
1: {"", int(^uint(0) >> 1), ""},
2: {"-", 10, ""},
3: {"gopher", 0, ""},
4: {"-", -1, "negative"},
5: {"--", -102, "negative"},
6: {string(make([]byte, 255)), int((^uint(0))/255 + 1), "overflow"},
}
for i, tt := range tests {
err := repeat([]byte(tt.s), tt.count)
if tt.errStr == "" {
if err != nil {
t.Errorf("#%d panicked %v", i, err)
}
continue
}
if err == nil || !strings.Contains(err.Error(), tt.errStr) {
t.Errorf("#%d expected %q got %q", i, tt.errStr, err)
}
}
}
func runesEqual(a, b []rune) bool { func runesEqual(a, b []rune) bool {
if len(a) != len(b) { if len(a) != len(b) {
return false return false
@ -906,6 +1037,9 @@ var trimTests = []TrimTest{
{"Trim", "* listitem", " *", "listitem"}, {"Trim", "* listitem", " *", "listitem"},
{"Trim", `"quote"`, `"`, "quote"}, {"Trim", `"quote"`, `"`, "quote"},
{"Trim", "\u2C6F\u2C6F\u0250\u0250\u2C6F\u2C6F", "\u2C6F", "\u0250\u0250"}, {"Trim", "\u2C6F\u2C6F\u0250\u0250\u2C6F\u2C6F", "\u2C6F", "\u0250\u0250"},
{"Trim", "\x80test\xff", "\xff", "test"},
{"Trim", " Ġ ", " ", "Ġ"},
{"Trim", " Ġİ0", "0 ", "Ġİ"},
//empty string tests //empty string tests
{"Trim", "abba", "", "abba"}, {"Trim", "abba", "", "abba"},
{"Trim", "", "123", ""}, {"Trim", "", "123", ""},
@ -1325,3 +1459,31 @@ func BenchmarkBytesCompare(b *testing.B) {
}) })
} }
} }
func BenchmarkIndexAnyASCII(b *testing.B) {
x := Repeat([]byte{'#'}, 4096) // Never matches set
cs := "0123456789abcdef"
for k := 1; k <= 4096; k <<= 4 {
for j := 1; j <= 16; j <<= 1 {
b.Run(fmt.Sprintf("%d:%d", k, j), func(b *testing.B) {
for i := 0; i < b.N; i++ {
IndexAny(x[:k], cs[:j])
}
})
}
}
}
func BenchmarkTrimASCII(b *testing.B) {
cs := "0123456789abcdef"
for k := 1; k <= 4096; k <<= 4 {
for j := 1; j <= 16; j <<= 1 {
b.Run(fmt.Sprintf("%d:%d", k, j), func(b *testing.B) {
x := Repeat([]byte(cs[:j]), k) // Always matches set
for i := 0; i < b.N; i++ {
Trim(x[:k], cs[:j])
}
})
}
}
}

View file

@ -11,6 +11,7 @@ import (
"io" "io"
"os" "os"
"sort" "sort"
"unicode"
) )
func ExampleBuffer() { func ExampleBuffer() {
@ -83,3 +84,205 @@ func ExampleTrimPrefix() {
fmt.Printf("Hello%s", b) fmt.Printf("Hello%s", b)
// Output: Hello, world! // Output: Hello, world!
} }
func ExampleFields() {
fmt.Printf("Fields are: %q", bytes.Fields([]byte(" foo bar baz ")))
// Output: Fields are: ["foo" "bar" "baz"]
}
func ExampleFieldsFunc() {
f := func(c rune) bool {
return !unicode.IsLetter(c) && !unicode.IsNumber(c)
}
fmt.Printf("Fields are: %q", bytes.FieldsFunc([]byte(" foo1;bar2,baz3..."), f))
// Output: Fields are: ["foo1" "bar2" "baz3"]
}
func ExampleContains() {
fmt.Println(bytes.Contains([]byte("seafood"), []byte("foo")))
fmt.Println(bytes.Contains([]byte("seafood"), []byte("bar")))
fmt.Println(bytes.Contains([]byte("seafood"), []byte("")))
fmt.Println(bytes.Contains([]byte(""), []byte("")))
// Output:
// true
// false
// true
// true
}
func ExampleCount() {
fmt.Println(bytes.Count([]byte("cheese"), []byte("e")))
fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune
// Output:
// 3
// 5
}
func ExampleEqualFold() {
fmt.Println(bytes.EqualFold([]byte("Go"), []byte("go")))
// Output: true
}
func ExampleHasPrefix() {
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("Go")))
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("C")))
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("")))
// Output:
// true
// false
// true
}
func ExampleHasSuffix() {
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("go")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("O")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("Ami")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("")))
// Output:
// true
// false
// false
// true
}
func ExampleIndex() {
fmt.Println(bytes.Index([]byte("chicken"), []byte("ken")))
fmt.Println(bytes.Index([]byte("chicken"), []byte("dmr")))
// Output:
// 4
// -1
}
func ExampleIndexFunc() {
f := func(c rune) bool {
return unicode.Is(unicode.Han, c)
}
fmt.Println(bytes.IndexFunc([]byte("Hello, 世界"), f))
fmt.Println(bytes.IndexFunc([]byte("Hello, world"), f))
// Output:
// 7
// -1
}
func ExampleIndexAny() {
fmt.Println(bytes.IndexAny([]byte("chicken"), "aeiouy"))
fmt.Println(bytes.IndexAny([]byte("crwth"), "aeiouy"))
// Output:
// 2
// -1
}
func ExampleIndexRune() {
fmt.Println(bytes.IndexRune([]byte("chicken"), 'k'))
fmt.Println(bytes.IndexRune([]byte("chicken"), 'd'))
// Output:
// 4
// -1
}
func ExampleLastIndex() {
fmt.Println(bytes.Index([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("rodent")))
// Output:
// 0
// 3
// -1
}
func ExampleJoin() {
s := [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}
fmt.Printf("%s", bytes.Join(s, []byte(", ")))
// Output: foo, bar, baz
}
func ExampleRepeat() {
fmt.Printf("ba%s", bytes.Repeat([]byte("na"), 2))
// Output: banana
}
func ExampleReplace() {
fmt.Printf("%s\n", bytes.Replace([]byte("oink oink oink"), []byte("k"), []byte("ky"), 2))
fmt.Printf("%s\n", bytes.Replace([]byte("oink oink oink"), []byte("oink"), []byte("moo"), -1))
// Output:
// oinky oinky oink
// moo moo moo
}
func ExampleSplit() {
fmt.Printf("%q\n", bytes.Split([]byte("a,b,c"), []byte(",")))
fmt.Printf("%q\n", bytes.Split([]byte("a man a plan a canal panama"), []byte("a ")))
fmt.Printf("%q\n", bytes.Split([]byte(" xyz "), []byte("")))
fmt.Printf("%q\n", bytes.Split([]byte(""), []byte("Bernardo O'Higgins")))
// Output:
// ["a" "b" "c"]
// ["" "man " "plan " "canal panama"]
// [" " "x" "y" "z" " "]
// [""]
}
func ExampleSplitN() {
fmt.Printf("%q\n", bytes.SplitN([]byte("a,b,c"), []byte(","), 2))
z := bytes.SplitN([]byte("a,b,c"), []byte(","), 0)
fmt.Printf("%q (nil = %v)\n", z, z == nil)
// Output:
// ["a" "b,c"]
// [] (nil = true)
}
func ExampleSplitAfter() {
fmt.Printf("%q\n", bytes.SplitAfter([]byte("a,b,c"), []byte(",")))
// Output: ["a," "b," "c"]
}
func ExampleSplitAfterN() {
fmt.Printf("%q\n", bytes.SplitAfterN([]byte("a,b,c"), []byte(","), 2))
// Output: ["a," "b,c"]
}
func ExampleTitle() {
fmt.Printf("%s", bytes.Title([]byte("her royal highness")))
// Output: Her Royal Highness
}
func ExampleToTitle() {
fmt.Printf("%s\n", bytes.ToTitle([]byte("loud noises")))
fmt.Printf("%s\n", bytes.ToTitle([]byte("хлеб")))
// Output:
// LOUD NOISES
// ХЛЕБ
}
func ExampleTrim() {
fmt.Printf("[%q]", bytes.Trim([]byte(" !!! Achtung! Achtung! !!! "), "! "))
// Output: ["Achtung! Achtung"]
}
func ExampleMap() {
rot13 := func(r rune) rune {
switch {
case r >= 'A' && r <= 'Z':
return 'A' + (r-'A'+13)%26
case r >= 'a' && r <= 'z':
return 'a' + (r-'a'+13)%26
}
return r
}
fmt.Printf("%s", bytes.Map(rot13, []byte("'Twas brillig and the slithy gopher...")))
// Output: 'Gjnf oevyyvt naq gur fyvgul tbcure...
}
func ExampleTrimSpace() {
fmt.Printf("%s", bytes.TrimSpace([]byte(" \t\n a lone gopher \n\t\r\n")))
// Output: a lone gopher
}
func ExampleToUpper() {
fmt.Printf("%s", bytes.ToUpper([]byte("Gopher")))
// Output: GOPHER
}
func ExampleToLower() {
fmt.Printf("%s", bytes.ToLower([]byte("Gopher")))
// Output: gopher
}

View file

@ -87,6 +87,7 @@ func (f *File) ReadGo(name string) {
if cg != nil { if cg != nil {
f.Preamble += fmt.Sprintf("#line %d %q\n", sourceLine(cg), name) f.Preamble += fmt.Sprintf("#line %d %q\n", sourceLine(cg), name)
f.Preamble += commentText(cg) + "\n" f.Preamble += commentText(cg) + "\n"
f.Preamble += "#line 1 \"cgo-generated-wrapper\"\n"
} }
} }
} }
@ -296,7 +297,7 @@ func (f *File) walk(x interface{}, context string, visit func(*File, interface{}
// everything else just recurs // everything else just recurs
default: default:
error_(token.NoPos, "unexpected type %T in walk", x, visit) error_(token.NoPos, "unexpected type %T in walk", x)
panic("unexpected type") panic("unexpected type")
case nil: case nil:

View file

@ -53,6 +53,8 @@ For example:
// #include <png.h> // #include <png.h>
import "C" import "C"
The default pkg-config tool may be changed by setting the PKG_CONFIG environment variable.
When building, the CGO_CFLAGS, CGO_CPPFLAGS, CGO_CXXFLAGS, CGO_FFLAGS and When building, the CGO_CFLAGS, CGO_CPPFLAGS, CGO_CXXFLAGS, CGO_FFLAGS and
CGO_LDFLAGS environment variables are added to the flags derived from CGO_LDFLAGS environment variables are added to the flags derived from
these directives. Package-specific flags should be set using the these directives. Package-specific flags should be set using the
@ -214,6 +216,13 @@ by making copies of the data. In pseudo-Go definitions:
// C data with explicit length to Go []byte // C data with explicit length to Go []byte
func C.GoBytes(unsafe.Pointer, C.int) []byte func C.GoBytes(unsafe.Pointer, C.int) []byte
As a special case, C.malloc does not call the C library malloc directly
but instead calls a Go helper function that wraps the C library malloc
but guarantees never to return nil. If C's malloc indicates out of memory,
the helper function crashes the program, like when Go itself runs out
of memory. Because C.malloc cannot fail, it has no two-result form
that returns errno.
C references to Go C references to Go
Go functions can be exported for use by C code in the following way: Go functions can be exported for use by C code in the following way:
@ -317,6 +326,9 @@ The following options are available when running cgo directly:
Write out input file in Go syntax replacing C package Write out input file in Go syntax replacing C package
names with real values. Used to generate files in the names with real values. Used to generate files in the
syscall package when bootstrapping a new target. syscall package when bootstrapping a new target.
-srcdir directory
Find the Go input files, listed on the command line,
in directory.
-objdir directory -objdir directory
Put all generated files in directory. Put all generated files in directory.
-importpath string -importpath string

View file

@ -167,7 +167,23 @@ func (p *Package) Translate(f *File) {
if len(needType) > 0 { if len(needType) > 0 {
p.loadDWARF(f, needType) p.loadDWARF(f, needType)
} }
p.rewriteCalls(f) if p.rewriteCalls(f) {
// Add `import _cgo_unsafe "unsafe"` as the first decl
// after the package statement.
imp := &ast.GenDecl{
Tok: token.IMPORT,
Specs: []ast.Spec{
&ast.ImportSpec{
Name: ast.NewIdent("_cgo_unsafe"),
Path: &ast.BasicLit{
Kind: token.STRING,
Value: `"unsafe"`,
},
},
},
}
f.AST.Decls = append([]ast.Decl{imp}, f.AST.Decls...)
}
p.rewriteRef(f) p.rewriteRef(f)
} }
@ -413,6 +429,7 @@ func (p *Package) loadDWARF(f *File, names []*Name) {
var b bytes.Buffer var b bytes.Buffer
b.WriteString(f.Preamble) b.WriteString(f.Preamble)
b.WriteString(builtinProlog) b.WriteString(builtinProlog)
b.WriteString("#line 1 \"cgo-dwarf-inference\"\n")
for i, n := range names { for i, n := range names {
fmt.Fprintf(&b, "__typeof__(%s) *__cgo__%d;\n", n.C, i) fmt.Fprintf(&b, "__typeof__(%s) *__cgo__%d;\n", n.C, i)
if n.Kind == "const" { if n.Kind == "const" {
@ -578,7 +595,9 @@ func (p *Package) mangleName(n *Name) {
// rewriteCalls rewrites all calls that pass pointers to check that // rewriteCalls rewrites all calls that pass pointers to check that
// they follow the rules for passing pointers between Go and C. // they follow the rules for passing pointers between Go and C.
func (p *Package) rewriteCalls(f *File) { // This returns whether the package needs to import unsafe as _cgo_unsafe.
func (p *Package) rewriteCalls(f *File) bool {
needsUnsafe := false
for _, call := range f.Calls { for _, call := range f.Calls {
// This is a call to C.xxx; set goname to "xxx". // This is a call to C.xxx; set goname to "xxx".
goname := call.Call.Fun.(*ast.SelectorExpr).Sel.Name goname := call.Call.Fun.(*ast.SelectorExpr).Sel.Name
@ -590,18 +609,24 @@ func (p *Package) rewriteCalls(f *File) {
// Probably a type conversion. // Probably a type conversion.
continue continue
} }
p.rewriteCall(f, call, name) if p.rewriteCall(f, call, name) {
needsUnsafe = true
}
} }
return needsUnsafe
} }
// rewriteCall rewrites one call to add pointer checks. We replace // rewriteCall rewrites one call to add pointer checks.
// each pointer argument x with _cgoCheckPointer(x).(T). // If any pointer checks are required, we rewrite the call into a
func (p *Package) rewriteCall(f *File, call *Call, name *Name) { // function literal that calls _cgoCheckPointer for each pointer
// argument and then calls the original function.
// This returns whether the package needs to import unsafe as _cgo_unsafe.
func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool {
// Avoid a crash if the number of arguments is // Avoid a crash if the number of arguments is
// less than the number of parameters. // less than the number of parameters.
// This will be caught when the generated file is compiled. // This will be caught when the generated file is compiled.
if len(call.Call.Args) < len(name.FuncType.Params) { if len(call.Call.Args) < len(name.FuncType.Params) {
return return false
} }
any := false any := false
@ -612,38 +637,60 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) {
} }
} }
if !any { if !any {
return return false
} }
// We need to rewrite this call. // We need to rewrite this call.
// //
// We are going to rewrite C.f(p) to C.f(_cgoCheckPointer(p)). // We are going to rewrite C.f(p) to
// If the call to C.f is deferred, that will check p at the // func (_cgo0 ptype) {
// point of the defer statement, not when the function is called, so // _cgoCheckPointer(_cgo0)
// rewrite to func(_cgo0 ptype) { C.f(_cgoCheckPointer(_cgo0)) }(p) // C.f(_cgo0)
// }(p)
var dargs []ast.Expr // Using a function literal like this lets us do correct
if call.Deferred { // argument type checking, and works correctly if the call is
dargs = make([]ast.Expr, len(name.FuncType.Params)) // deferred.
} needsUnsafe := false
params := make([]*ast.Field, len(name.FuncType.Params))
nargs := make([]ast.Expr, len(name.FuncType.Params))
var stmts []ast.Stmt
for i, param := range name.FuncType.Params { for i, param := range name.FuncType.Params {
origArg := call.Call.Args[i] // params is going to become the parameters of the
darg := origArg // function literal.
// nargs is going to become the list of arguments made
// by the call within the function literal.
// nparam is the parameter of the function literal that
// corresponds to param.
if call.Deferred { origArg := call.Call.Args[i]
dargs[i] = darg nparam := ast.NewIdent(fmt.Sprintf("_cgo%d", i))
darg = ast.NewIdent(fmt.Sprintf("_cgo%d", i)) nargs[i] = nparam
call.Call.Args[i] = darg
// The Go version of the C type might use unsafe.Pointer,
// but the file might not import unsafe.
// Rewrite the Go type if necessary to use _cgo_unsafe.
ptype := p.rewriteUnsafe(param.Go)
if ptype != param.Go {
needsUnsafe = true
}
params[i] = &ast.Field{
Names: []*ast.Ident{nparam},
Type: ptype,
} }
if !p.needsPointerCheck(f, param.Go, origArg) { if !p.needsPointerCheck(f, param.Go, origArg) {
continue continue
} }
// Run the cgo pointer checks on nparam.
// Change the function literal to call the real function
// with the parameter passed through _cgoCheckPointer.
c := &ast.CallExpr{ c := &ast.CallExpr{
Fun: ast.NewIdent("_cgoCheckPointer"), Fun: ast.NewIdent("_cgoCheckPointer"),
Args: []ast.Expr{ Args: []ast.Expr{
darg, nparam,
}, },
} }
@ -651,95 +698,83 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) {
// expression. // expression.
c.Args = p.checkAddrArgs(f, c.Args, origArg) c.Args = p.checkAddrArgs(f, c.Args, origArg)
// _cgoCheckPointer returns interface{}. stmt := &ast.ExprStmt{
// We need to type assert that to the type we want. X: c,
// If the Go version of this C type uses
// unsafe.Pointer, we can't use a type assertion,
// because the Go file might not import unsafe.
// Instead we use a local variant of _cgoCheckPointer.
var arg ast.Expr
if n := p.unsafeCheckPointerName(param.Go, call.Deferred); n != "" {
c.Fun = ast.NewIdent(n)
arg = c
} else {
// In order for the type assertion to succeed,
// we need it to match the actual type of the
// argument. The only type we have is the
// type of the function parameter. We know
// that the argument type must be assignable
// to the function parameter type, or the code
// would not compile, but there is nothing
// requiring that the types be exactly the
// same. Add a type conversion to the
// argument so that the type assertion will
// succeed.
c.Args[0] = &ast.CallExpr{
Fun: param.Go,
Args: []ast.Expr{
c.Args[0],
},
}
arg = &ast.TypeAssertExpr{
X: c,
Type: param.Go,
}
} }
stmts = append(stmts, stmt)
call.Call.Args[i] = arg
} }
if call.Deferred { fcall := &ast.CallExpr{
params := make([]*ast.Field, len(name.FuncType.Params)) Fun: call.Call.Fun,
for i, param := range name.FuncType.Params { Args: nargs,
ptype := param.Go }
if p.hasUnsafePointer(ptype) { ftype := &ast.FuncType{
// Avoid generating unsafe.Pointer by using Params: &ast.FieldList{
// interface{}. This works because we are List: params,
// going to call a _cgoCheckPointer function },
// anyhow. }
ptype = &ast.InterfaceType{ if name.FuncType.Result != nil {
Methods: &ast.FieldList{}, rtype := p.rewriteUnsafe(name.FuncType.Result.Go)
if rtype != name.FuncType.Result.Go {
needsUnsafe = true
}
ftype.Results = &ast.FieldList{
List: []*ast.Field{
&ast.Field{
Type: rtype,
},
},
}
}
// There is a Ref pointing to the old call.Call.Fun.
for _, ref := range f.Ref {
if ref.Expr == &call.Call.Fun {
ref.Expr = &fcall.Fun
// If this call expects two results, we have to
// adjust the results of the function we generated.
if ref.Context == "call2" {
if ftype.Results == nil {
// An explicit void argument
// looks odd but it seems to
// be how cgo has worked historically.
ftype.Results = &ast.FieldList{
List: []*ast.Field{
&ast.Field{
Type: ast.NewIdent("_Ctype_void"),
},
},
}
} }
} ftype.Results.List = append(ftype.Results.List,
params[i] = &ast.Field{ &ast.Field{
Names: []*ast.Ident{ Type: ast.NewIdent("error"),
ast.NewIdent(fmt.Sprintf("_cgo%d", i)), })
},
Type: ptype,
}
}
dbody := &ast.CallExpr{
Fun: call.Call.Fun,
Args: call.Call.Args,
}
call.Call.Fun = &ast.FuncLit{
Type: &ast.FuncType{
Params: &ast.FieldList{
List: params,
},
},
Body: &ast.BlockStmt{
List: []ast.Stmt{
&ast.ExprStmt{
X: dbody,
},
},
},
}
call.Call.Args = dargs
call.Call.Lparen = token.NoPos
call.Call.Rparen = token.NoPos
// There is a Ref pointing to the old call.Call.Fun.
for _, ref := range f.Ref {
if ref.Expr == &call.Call.Fun {
ref.Expr = &dbody.Fun
} }
} }
} }
var fbody ast.Stmt
if ftype.Results == nil {
fbody = &ast.ExprStmt{
X: fcall,
}
} else {
fbody = &ast.ReturnStmt{
Results: []ast.Expr{fcall},
}
}
call.Call.Fun = &ast.FuncLit{
Type: ftype,
Body: &ast.BlockStmt{
List: append(stmts, fbody),
},
}
call.Call.Lparen = token.NoPos
call.Call.Rparen = token.NoPos
return needsUnsafe
} }
// needsPointerCheck returns whether the type t needs a pointer check. // needsPointerCheck returns whether the type t needs a pointer check.
@ -782,6 +817,11 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
if !top { if !top {
return true return true
} }
// Check whether this is a pointer to a C union (or class)
// type that contains a pointer.
if unionWithPointer[t.X] {
return true
}
return p.hasPointer(f, t.X, false) return p.hasPointer(f, t.X, false)
case *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: case *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
return true return true
@ -935,69 +975,52 @@ func (p *Package) isType(t ast.Expr) bool {
return false return false
} }
// unsafeCheckPointerName is given the Go version of a C type. If the // rewriteUnsafe returns a version of t with references to unsafe.Pointer
// type uses unsafe.Pointer, we arrange to build a version of // rewritten to use _cgo_unsafe.Pointer instead.
// _cgoCheckPointer that returns that type. This avoids using a type func (p *Package) rewriteUnsafe(t ast.Expr) ast.Expr {
// assertion to unsafe.Pointer in our copy of user code. We return
// the name of the _cgoCheckPointer function we are going to build, or
// the empty string if the type does not use unsafe.Pointer.
//
// The deferred parameter is true if this check is for the argument of
// a deferred function. In that case we need to use an empty interface
// as the argument type, because the deferred function we introduce in
// rewriteCall will use an empty interface type, and we can't add a
// type assertion. This is handled by keeping a separate list, and
// writing out the lists separately in writeDefs.
func (p *Package) unsafeCheckPointerName(t ast.Expr, deferred bool) string {
if !p.hasUnsafePointer(t) {
return ""
}
var buf bytes.Buffer
conf.Fprint(&buf, fset, t)
s := buf.String()
checks := &p.CgoChecks
if deferred {
checks = &p.DeferredCgoChecks
}
for i, t := range *checks {
if s == t {
return p.unsafeCheckPointerNameIndex(i, deferred)
}
}
*checks = append(*checks, s)
return p.unsafeCheckPointerNameIndex(len(*checks)-1, deferred)
}
// hasUnsafePointer returns whether the Go type t uses unsafe.Pointer.
// t is the Go version of a C type, so we don't need to handle every case.
// We only care about direct references, not references via typedefs.
func (p *Package) hasUnsafePointer(t ast.Expr) bool {
switch t := t.(type) { switch t := t.(type) {
case *ast.Ident: case *ast.Ident:
// We don't see a SelectorExpr for unsafe.Pointer; // We don't see a SelectorExpr for unsafe.Pointer;
// this is created by code in this file. // this is created by code in this file.
return t.Name == "unsafe.Pointer" if t.Name == "unsafe.Pointer" {
return ast.NewIdent("_cgo_unsafe.Pointer")
}
case *ast.ArrayType: case *ast.ArrayType:
return p.hasUnsafePointer(t.Elt) t1 := p.rewriteUnsafe(t.Elt)
if t1 != t.Elt {
r := *t
r.Elt = t1
return &r
}
case *ast.StructType: case *ast.StructType:
changed := false
fields := *t.Fields
fields.List = nil
for _, f := range t.Fields.List { for _, f := range t.Fields.List {
if p.hasUnsafePointer(f.Type) { ft := p.rewriteUnsafe(f.Type)
return true if ft == f.Type {
fields.List = append(fields.List, f)
} else {
fn := *f
fn.Type = ft
fields.List = append(fields.List, &fn)
changed = true
} }
} }
if changed {
r := *t
r.Fields = &fields
return &r
}
case *ast.StarExpr: // Pointer type. case *ast.StarExpr: // Pointer type.
return p.hasUnsafePointer(t.X) x1 := p.rewriteUnsafe(t.X)
if x1 != t.X {
r := *t
r.X = x1
return &r
}
} }
return false return t
}
// unsafeCheckPointerNameIndex returns the name to use for a
// _cgoCheckPointer variant based on the index in the CgoChecks slice.
func (p *Package) unsafeCheckPointerNameIndex(i int, deferred bool) string {
if deferred {
return fmt.Sprintf("_cgoCheckPointerInDefer%d", i)
}
return fmt.Sprintf("_cgoCheckPointer%d", i)
} }
// rewriteRef rewrites all the C.xxx references in f.AST to refer to the // rewriteRef rewrites all the C.xxx references in f.AST to refer to the
@ -1187,6 +1210,8 @@ func (p *Package) gccMachine() []string {
return []string{"-m64"} return []string{"-m64"}
case "mips64", "mips64le": case "mips64", "mips64le":
return []string{"-mabi=64"} return []string{"-mabi=64"}
case "mips", "mipsle":
return []string{"-mabi=32"}
} }
return nil return nil
} }
@ -1415,6 +1440,10 @@ var tagGen int
var typedef = make(map[string]*Type) var typedef = make(map[string]*Type)
var goIdent = make(map[string]*ast.Ident) var goIdent = make(map[string]*ast.Ident)
// unionWithPointer is true for a Go type that represents a C union (or class)
// that may contain a pointer. This is used for cgo pointer checking.
var unionWithPointer = make(map[ast.Expr]bool)
func (c *typeConv) Init(ptrSize, intSize int64) { func (c *typeConv) Init(ptrSize, intSize int64) {
c.ptrSize = ptrSize c.ptrSize = ptrSize
c.intSize = intSize c.intSize = intSize
@ -1464,6 +1493,19 @@ func base(dt dwarf.Type) dwarf.Type {
return dt return dt
} }
// unqual strips away qualifiers from a DWARF type.
// In general we don't care about top-level qualifiers.
func unqual(dt dwarf.Type) dwarf.Type {
for {
if d, ok := dt.(*dwarf.QualType); ok {
dt = d.Type
} else {
break
}
}
return dt
}
// Map from dwarf text names to aliases we use in package "C". // Map from dwarf text names to aliases we use in package "C".
var dwarfToName = map[string]string{ var dwarfToName = map[string]string{
"long int": "long", "long int": "long",
@ -1641,7 +1683,7 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
case 16: case 16:
t.Go = c.complex128 t.Go = c.complex128
} }
if t.Align = t.Size; t.Align >= c.ptrSize { if t.Align = t.Size / 2; t.Align >= c.ptrSize {
t.Align = c.ptrSize t.Align = c.ptrSize
} }
@ -1687,6 +1729,15 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
if _, ok := base(dt.Type).(*dwarf.VoidType); ok { if _, ok := base(dt.Type).(*dwarf.VoidType); ok {
t.Go = c.goVoidPtr t.Go = c.goVoidPtr
t.C.Set("void*") t.C.Set("void*")
dq := dt.Type
for {
if d, ok := dq.(*dwarf.QualType); ok {
t.C.Set(d.Qual + " " + t.C.String())
dq = d.Type
} else {
break
}
}
break break
} }
@ -1699,9 +1750,16 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
c.ptrs[dt.Type] = append(c.ptrs[dt.Type], t) c.ptrs[dt.Type] = append(c.ptrs[dt.Type], t)
case *dwarf.QualType: case *dwarf.QualType:
// Ignore qualifier. t1 := c.Type(dt.Type, pos)
t = c.Type(dt.Type, pos) t.Size = t1.Size
c.m[dtype] = t t.Align = t1.Align
t.Go = t1.Go
if unionWithPointer[t1.Go] {
unionWithPointer[t.Go] = true
}
t.EnumValues = nil
t.Typedef = ""
t.C.Set("%s "+dt.Qual, t1.C)
return t return t
case *dwarf.StructType: case *dwarf.StructType:
@ -1733,6 +1791,9 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
switch dt.Kind { switch dt.Kind {
case "class", "union": case "class", "union":
t.Go = c.Opaque(t.Size) t.Go = c.Opaque(t.Size)
if c.dwarfHasPointer(dt, pos) {
unionWithPointer[t.Go] = true
}
if t.C.Empty() { if t.C.Empty() {
t.C.Set("__typeof__(unsigned char[%d])", t.Size) t.C.Set("__typeof__(unsigned char[%d])", t.Size)
} }
@ -1775,6 +1836,9 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
goIdent[name.Name] = name goIdent[name.Name] = name
sub := c.Type(dt.Type, pos) sub := c.Type(dt.Type, pos)
t.Go = name t.Go = name
if unionWithPointer[sub.Go] {
unionWithPointer[t.Go] = true
}
t.Size = sub.Size t.Size = sub.Size
t.Align = sub.Align t.Align = sub.Align
oldType := typedef[name.Name] oldType := typedef[name.Name]
@ -1905,7 +1969,7 @@ func isStructUnionClass(x ast.Expr) bool {
// FuncArg returns a Go type with the same memory layout as // FuncArg returns a Go type with the same memory layout as
// dtype when used as the type of a C function argument. // dtype when used as the type of a C function argument.
func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type { func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type {
t := c.Type(dtype, pos) t := c.Type(unqual(dtype), pos)
switch dt := dtype.(type) { switch dt := dtype.(type) {
case *dwarf.ArrayType: case *dwarf.ArrayType:
// Arrays are passed implicitly as pointers in C. // Arrays are passed implicitly as pointers in C.
@ -1935,9 +1999,12 @@ func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type {
return nil return nil
} }
// Remember the C spelling, in case the struct // For a struct/union/class, remember the C spelling,
// has __attribute__((unavailable)) on it. See issue 2888. // in case it has __attribute__((unavailable)).
t.Typedef = dt.Name // See issue 2888.
if isStructUnionClass(t.Go) {
t.Typedef = dt.Name
}
} }
} }
return t return t
@ -1966,7 +2033,7 @@ func (c *typeConv) FuncType(dtype *dwarf.FuncType, pos token.Pos) *FuncType {
if _, ok := dtype.ReturnType.(*dwarf.VoidType); ok { if _, ok := dtype.ReturnType.(*dwarf.VoidType); ok {
gr = []*ast.Field{{Type: c.goVoid}} gr = []*ast.Field{{Type: c.goVoid}}
} else if dtype.ReturnType != nil { } else if dtype.ReturnType != nil {
r = c.Type(dtype.ReturnType, pos) r = c.Type(unqual(dtype.ReturnType), pos)
gr = []*ast.Field{{Type: r.Go}} gr = []*ast.Field{{Type: r.Go}}
} }
return &FuncType{ return &FuncType{
@ -2153,6 +2220,44 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct
return return
} }
// dwarfHasPointer returns whether the DWARF type dt contains a pointer.
func (c *typeConv) dwarfHasPointer(dt dwarf.Type, pos token.Pos) bool {
switch dt := dt.(type) {
default:
fatalf("%s: unexpected type: %s", lineno(pos), dt)
return false
case *dwarf.AddrType, *dwarf.BoolType, *dwarf.CharType, *dwarf.EnumType,
*dwarf.FloatType, *dwarf.ComplexType, *dwarf.FuncType,
*dwarf.IntType, *dwarf.UcharType, *dwarf.UintType, *dwarf.VoidType:
return false
case *dwarf.ArrayType:
return c.dwarfHasPointer(dt.Type, pos)
case *dwarf.PtrType:
return true
case *dwarf.QualType:
return c.dwarfHasPointer(dt.Type, pos)
case *dwarf.StructType:
for _, f := range dt.Field {
if c.dwarfHasPointer(f.Type, pos) {
return true
}
}
return false
case *dwarf.TypedefType:
if dt.Name == "_GoString_" || dt.Name == "_GoBytes_" {
return true
}
return c.dwarfHasPointer(dt.Type, pos)
}
}
func upper(s string) string { func upper(s string) string {
if s == "" { if s == "" {
return "" return ""

View file

@ -42,10 +42,6 @@ type Package struct {
GoFiles []string // list of Go files GoFiles []string // list of Go files
GccFiles []string // list of gcc output files GccFiles []string // list of gcc output files
Preamble string // collected preamble for _cgo_export.h Preamble string // collected preamble for _cgo_export.h
// See unsafeCheckPointerName.
CgoChecks []string
DeferredCgoChecks []string
} }
// A File collects information about a single Go input file. // A File collects information about a single Go input file.
@ -153,6 +149,8 @@ var ptrSizeMap = map[string]int64{
"mipsn32": 4, "mipsn32": 4,
"mipso64": 8, "mipso64": 8,
"mipsn64": 8, "mipsn64": 8,
"mips": 4,
"mipsle": 4,
"mips64": 8, "mips64": 8,
"mips64le": 8, "mips64le": 8,
"ppc": 4, "ppc": 4,
@ -175,6 +173,8 @@ var intSizeMap = map[string]int64{
"mipsn32": 4, "mipsn32": 4,
"mipso64": 8, "mipso64": 8,
"mipsn64": 8, "mipsn64": 8,
"mips": 4,
"mipsle": 4,
"mips64": 8, "mips64": 8,
"mips64le": 8, "mips64le": 8,
"ppc": 4, "ppc": 4,
@ -200,6 +200,7 @@ var dynlinker = flag.Bool("dynlinker", false, "record dynamic linker information
// constant values used in the host's C libraries and system calls. // constant values used in the host's C libraries and system calls.
var godefs = flag.Bool("godefs", false, "for bootstrap: write Go definitions for C file to standard output") var godefs = flag.Bool("godefs", false, "for bootstrap: write Go definitions for C file to standard output")
var srcDir = flag.String("srcdir", "", "source directory")
var objDir = flag.String("objdir", "", "object directory") var objDir = flag.String("objdir", "", "object directory")
var importPath = flag.String("importpath", "", "import path of package being built (for comments in generated files)") var importPath = flag.String("importpath", "", "import path of package being built (for comments in generated files)")
var exportHeader = flag.String("exportheader", "", "where to write export header if any exported functions") var exportHeader = flag.String("exportheader", "", "where to write export header if any exported functions")
@ -278,6 +279,9 @@ func main() {
// Use the beginning of the md5 of the input to disambiguate. // Use the beginning of the md5 of the input to disambiguate.
h := md5.New() h := md5.New()
for _, input := range goFiles { for _, input := range goFiles {
if *srcDir != "" {
input = filepath.Join(*srcDir, input)
}
f, err := os.Open(input) f, err := os.Open(input)
if err != nil { if err != nil {
fatalf("%s", err) fatalf("%s", err)
@ -289,6 +293,9 @@ func main() {
fs := make([]*File, len(goFiles)) fs := make([]*File, len(goFiles))
for i, input := range goFiles { for i, input := range goFiles {
if *srcDir != "" {
input = filepath.Join(*srcDir, input)
}
f := new(File) f := new(File)
f.ReadGo(input) f.ReadGo(input)
f.DiscardCgoDirectives() f.DiscardCgoDirectives()

View file

@ -19,7 +19,10 @@ import (
"strings" "strings"
) )
var conf = printer.Config{Mode: printer.SourcePos, Tabwidth: 8} var (
conf = printer.Config{Mode: printer.SourcePos, Tabwidth: 8}
noSourceConf = printer.Config{Tabwidth: 8}
)
// writeDefs creates output files to be compiled by gc and gcc. // writeDefs creates output files to be compiled by gc and gcc.
func (p *Package) writeDefs() { func (p *Package) writeDefs() {
@ -95,7 +98,19 @@ func (p *Package) writeDefs() {
for _, name := range typedefNames { for _, name := range typedefNames {
def := typedef[name] def := typedef[name]
fmt.Fprintf(fgo2, "type %s ", name) fmt.Fprintf(fgo2, "type %s ", name)
conf.Fprint(fgo2, fset, def.Go) // We don't have source info for these types, so write them out without source info.
// Otherwise types would look like:
//
// type _Ctype_struct_cb struct {
// //line :1
// on_test *[0]byte
// //line :1
// }
//
// Which is not useful. Moreover we never override source info,
// so subsequent source code uses the same source info.
// Moreover, empty file name makes compile emit no source debug info at all.
noSourceConf.Fprint(fgo2, fset, def.Go)
fmt.Fprintf(fgo2, "\n\n") fmt.Fprintf(fgo2, "\n\n")
} }
if *gccgo { if *gccgo {
@ -111,17 +126,11 @@ func (p *Package) writeDefs() {
fmt.Fprint(fgo2, goProlog) fmt.Fprint(fgo2, goProlog)
} }
for i, t := range p.CgoChecks { if fc != nil {
n := p.unsafeCheckPointerNameIndex(i, false) fmt.Fprintf(fc, "#line 1 \"cgo-generated-wrappers\"\n")
fmt.Fprintf(fgo2, "\nfunc %s(p %s, args ...interface{}) %s {\n", n, t, t)
fmt.Fprintf(fgo2, "\treturn _cgoCheckPointer(p, args...).(%s)\n", t)
fmt.Fprintf(fgo2, "}\n")
} }
for i, t := range p.DeferredCgoChecks { if fm != nil {
n := p.unsafeCheckPointerNameIndex(i, true) fmt.Fprintf(fm, "#line 1 \"cgo-generated-wrappers\"\n")
fmt.Fprintf(fgo2, "\nfunc %s(p interface{}, args ...interface{}) %s {\n", n, t)
fmt.Fprintf(fgo2, "\treturn _cgoCheckPointer(p, args...).(%s)\n", t)
fmt.Fprintf(fgo2, "}\n")
} }
gccgoSymbolPrefix := p.gccgoSymbolPrefix() gccgoSymbolPrefix := p.gccgoSymbolPrefix()
@ -346,11 +355,7 @@ func (p *Package) structType(n *Name) (string, int64) {
fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad) fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad)
off += pad off += pad
} }
qual := "" fmt.Fprintf(&buf, "\t\t%s r;\n", t.C)
if c := t.C.String(); c[len(c)-1] == '*' {
qual = "const "
}
fmt.Fprintf(&buf, "\t\t%s%s r;\n", qual, t.C)
off += t.Size off += t.Size
} }
if off%p.PtrSize != 0 { if off%p.PtrSize != 0 {
@ -611,20 +616,10 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
} }
} }
fmt.Fprintf(fgcc, "%s(", n.C) fmt.Fprintf(fgcc, "%s(", n.C)
for i, t := range n.FuncType.Params { for i := range n.FuncType.Params {
if i > 0 { if i > 0 {
fmt.Fprintf(fgcc, ", ") fmt.Fprintf(fgcc, ", ")
} }
// We know the type params are correct, because
// the Go equivalents had good type params.
// However, our version of the type omits the magic
// words const and volatile, which can provoke
// C compiler warnings. Silence them by casting
// all pointers to void*. (Eventually that will produce
// other warnings.)
if c := t.C.String(); c[len(c)-1] == '*' {
fmt.Fprintf(fgcc, "(void*)")
}
fmt.Fprintf(fgcc, "a->p%d", i) fmt.Fprintf(fgcc, "a->p%d", i)
} }
fmt.Fprintf(fgcc, ");\n") fmt.Fprintf(fgcc, ");\n")
@ -684,14 +679,10 @@ func (p *Package) writeGccgoOutputFunc(fgcc *os.File, n *Name) {
} }
} }
fmt.Fprintf(fgcc, "%s(", n.C) fmt.Fprintf(fgcc, "%s(", n.C)
for i, t := range n.FuncType.Params { for i := range n.FuncType.Params {
if i > 0 { if i > 0 {
fmt.Fprintf(fgcc, ", ") fmt.Fprintf(fgcc, ", ")
} }
// Cast to void* to avoid warnings due to omitted qualifiers.
if c := t.C.String(); c[len(c)-1] == '*' {
fmt.Fprintf(fgcc, "(void*)")
}
fmt.Fprintf(fgcc, "p%d", i) fmt.Fprintf(fgcc, "p%d", i)
} }
fmt.Fprintf(fgcc, ");\n") fmt.Fprintf(fgcc, ");\n")
@ -1217,8 +1208,8 @@ var goTypes = map[string]*Type{
"uint64": {Size: 8, Align: 8, C: c("GoUint64")}, "uint64": {Size: 8, Align: 8, C: c("GoUint64")},
"float32": {Size: 4, Align: 4, C: c("GoFloat32")}, "float32": {Size: 4, Align: 4, C: c("GoFloat32")},
"float64": {Size: 8, Align: 8, C: c("GoFloat64")}, "float64": {Size: 8, Align: 8, C: c("GoFloat64")},
"complex64": {Size: 8, Align: 8, C: c("GoComplex64")}, "complex64": {Size: 8, Align: 4, C: c("GoComplex64")},
"complex128": {Size: 16, Align: 16, C: c("GoComplex128")}, "complex128": {Size: 16, Align: 8, C: c("GoComplex128")},
} }
// Map an ast type to a Type. // Map an ast type to a Type.
@ -1299,6 +1290,7 @@ func (p *Package) cgoType(e ast.Expr) *Type {
} }
const gccProlog = ` const gccProlog = `
#line 1 "cgo-gcc-prolog"
/* /*
If x and y are not equal, the type will be invalid If x and y are not equal, the type will be invalid
(have a negative array count) and an inscrutable error will come (have a negative array count) and an inscrutable error will come
@ -1332,6 +1324,7 @@ const noTsanProlog = `
// This must match the TSAN code in runtime/cgo/libcgo.h. // This must match the TSAN code in runtime/cgo/libcgo.h.
const yesTsanProlog = ` const yesTsanProlog = `
#line 1 "cgo-tsan-prolog"
#define CGO_NO_SANITIZE_THREAD __attribute__ ((no_sanitize_thread)) #define CGO_NO_SANITIZE_THREAD __attribute__ ((no_sanitize_thread))
long long _cgo_sync __attribute__ ((common)); long long _cgo_sync __attribute__ ((common));
@ -1354,6 +1347,7 @@ static void _cgo_tsan_release() {
var tsanProlog = noTsanProlog var tsanProlog = noTsanProlog
const builtinProlog = ` const builtinProlog = `
#line 1 "cgo-builtin-prolog"
#include <stddef.h> /* for ptrdiff_t and size_t below */ #include <stddef.h> /* for ptrdiff_t and size_t below */
/* Define intgo when compiling with GCC. */ /* Define intgo when compiling with GCC. */
@ -1377,14 +1371,14 @@ func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32
func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr, uintptr) func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr, uintptr)
//go:linkname _cgoCheckPointer runtime.cgoCheckPointer //go:linkname _cgoCheckPointer runtime.cgoCheckPointer
func _cgoCheckPointer(interface{}, ...interface{}) interface{} func _cgoCheckPointer(interface{}, ...interface{})
//go:linkname _cgoCheckResult runtime.cgoCheckResult //go:linkname _cgoCheckResult runtime.cgoCheckResult
func _cgoCheckResult(interface{}) func _cgoCheckResult(interface{})
` `
const gccgoGoProlog = ` const gccgoGoProlog = `
func _cgoCheckPointer(interface{}, ...interface{}) interface{} func _cgoCheckPointer(interface{}, ...interface{})
func _cgoCheckResult(interface{}) func _cgoCheckResult(interface{})
` `
@ -1461,9 +1455,15 @@ const cMallocDefGo = `
var __cgofn__cgoPREFIX_Cfunc__Cmalloc byte var __cgofn__cgoPREFIX_Cfunc__Cmalloc byte
var _cgoPREFIX_Cfunc__Cmalloc = unsafe.Pointer(&__cgofn__cgoPREFIX_Cfunc__Cmalloc) var _cgoPREFIX_Cfunc__Cmalloc = unsafe.Pointer(&__cgofn__cgoPREFIX_Cfunc__Cmalloc)
//go:linkname runtime_throw runtime.throw
func runtime_throw(string)
//go:cgo_unsafe_args //go:cgo_unsafe_args
func _cgo_cmalloc(p0 uint64) (r1 unsafe.Pointer) { func _cgo_cmalloc(p0 uint64) (r1 unsafe.Pointer) {
_cgo_runtime_cgocall(_cgoPREFIX_Cfunc__Cmalloc, uintptr(unsafe.Pointer(&p0))) _cgo_runtime_cgocall(_cgoPREFIX_Cfunc__Cmalloc, uintptr(unsafe.Pointer(&p0)))
if r1 == nil {
runtime_throw("runtime: C malloc failed")
}
return return
} }
` `
@ -1500,6 +1500,7 @@ func (p *Package) cPrologGccgo() string {
} }
const cPrologGccgo = ` const cPrologGccgo = `
#line 1 "cgo-c-prolog-gccgo"
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
@ -1564,18 +1565,17 @@ typedef struct __go_empty_interface {
void *__object; void *__object;
} Eface; } Eface;
extern Eface runtimeCgoCheckPointer(Eface, Slice) extern void runtimeCgoCheckPointer(Eface, Slice)
__asm__("runtime.cgoCheckPointer") __asm__("runtime.cgoCheckPointer")
__attribute__((weak)); __attribute__((weak));
extern Eface localCgoCheckPointer(Eface, Slice) extern void localCgoCheckPointer(Eface, Slice)
__asm__("GCCGOSYMBOLPREF._cgoCheckPointer"); __asm__("GCCGOSYMBOLPREF._cgoCheckPointer");
Eface localCgoCheckPointer(Eface ptr, Slice args) { void localCgoCheckPointer(Eface ptr, Slice args) {
if(runtimeCgoCheckPointer) { if(runtimeCgoCheckPointer) {
return runtimeCgoCheckPointer(ptr, args); runtimeCgoCheckPointer(ptr, args);
} }
return ptr;
} }
extern void runtimeCgoCheckResult(Eface) extern void runtimeCgoCheckResult(Eface)
@ -1598,6 +1598,7 @@ func (p *Package) gccExportHeaderProlog() string {
const gccExportHeaderProlog = ` const gccExportHeaderProlog = `
/* Start of boilerplate cgo prologue. */ /* Start of boilerplate cgo prologue. */
#line 1 "cgo-gcc-export-header-prolog"
#ifndef GO_CGO_PROLOGUE_H #ifndef GO_CGO_PROLOGUE_H
#define GO_CGO_PROLOGUE_H #define GO_CGO_PROLOGUE_H
@ -1651,6 +1652,7 @@ const gccExportHeaderEpilog = `
// We use weak declarations, and test the addresses, so that this code // We use weak declarations, and test the addresses, so that this code
// works with older versions of gccgo. // works with older versions of gccgo.
const gccgoExportFileProlog = ` const gccgoExportFileProlog = `
#line 1 "cgo-gccgo-export-file-prolog"
extern _Bool runtime_iscgo __attribute__ ((weak)); extern _Bool runtime_iscgo __attribute__ ((weak));
static void GoInit(void) __attribute__ ((constructor)); static void GoInit(void) __attribute__ ((constructor));

View file

@ -17,6 +17,7 @@
// clean remove object files // clean remove object files
// doc show documentation for package or symbol // doc show documentation for package or symbol
// env print Go environment information // env print Go environment information
// bug print information for bug reports
// fix run go tool fix on packages // fix run go tool fix on packages
// fmt run gofmt on package sources // fmt run gofmt on package sources
// generate generate Go files by processing source // generate generate Go files by processing source
@ -323,6 +324,17 @@
// each named variable on its own line. // each named variable on its own line.
// //
// //
// Print information for bug reports
//
// Usage:
//
// go bug
//
// Bug prints information that helps file effective bug reports.
//
// Bugs may be reported at https://golang.org/issue/new.
//
//
// Run go tool fix on packages // Run go tool fix on packages
// //
// Usage: // Usage:
@ -367,7 +379,7 @@
// //
// Generate runs commands described by directives within existing // Generate runs commands described by directives within existing
// files. Those commands can run any process but the intent is to // files. Those commands can run any process but the intent is to
// create or update Go source files, for instance by running yacc. // create or update Go source files.
// //
// Go generate is never run automatically by go build, go get, go test, // Go generate is never run automatically by go build, go get, go test,
// and so on. It must be run explicitly. // and so on. It must be run explicitly.
@ -430,10 +442,10 @@
// can be used to create aliases or to handle multiword generators. // can be used to create aliases or to handle multiword generators.
// For example, // For example,
// //
// //go:generate -command yacc go tool yacc // //go:generate -command foo go tool foo
// //
// specifies that the command "yacc" represents the generator // specifies that the command "foo" represents the generator
// "go tool yacc". // "go tool foo".
// //
// Generate processes packages in the order given on the command line, // Generate processes packages in the order given on the command line,
// one at a time. If the command line lists .go files, they are treated // one at a time. If the command line lists .go files, they are treated
@ -496,11 +508,13 @@
// and their dependencies. By default, get uses the network to check out // and their dependencies. By default, get uses the network to check out
// missing packages but does not use it to look for updates to existing packages. // missing packages but does not use it to look for updates to existing packages.
// //
// The -v flag enables verbose progress and debug output.
//
// Get also accepts build flags to control the installation. See 'go help build'. // Get also accepts build flags to control the installation. See 'go help build'.
// //
// When checking out a new package, get creates the target directory // When checking out a new package, get creates the target directory
// GOPATH/src/<import-path>. If the GOPATH contains multiple entries, // GOPATH/src/<import-path>. If the GOPATH contains multiple entries,
// get uses the first one. See 'go help gopath'. // get uses the first one. For more details see: 'go help gopath'.
// //
// When checking out or updating a package, get looks for a branch or tag // When checking out or updating a package, get looks for a branch or tag
// that matches the locally installed version of Go. The most important // that matches the locally installed version of Go. The most important
@ -584,6 +598,8 @@
// SwigFiles []string // .swig files // SwigFiles []string // .swig files
// SwigCXXFiles []string // .swigcxx files // SwigCXXFiles []string // .swigcxx files
// SysoFiles []string // .syso object files to add to archive // SysoFiles []string // .syso object files to add to archive
// TestGoFiles []string // _test.go files in package
// XTestGoFiles []string // _test.go files outside package
// //
// // Cgo directives // // Cgo directives
// CgoCFLAGS []string // cgo: flags for C compiler // CgoCFLAGS []string // cgo: flags for C compiler
@ -594,20 +610,23 @@
// CgoPkgConfig []string // cgo: pkg-config names // CgoPkgConfig []string // cgo: pkg-config names
// //
// // Dependency information // // Dependency information
// Imports []string // import paths used by this package // Imports []string // import paths used by this package
// Deps []string // all (recursively) imported dependencies // Deps []string // all (recursively) imported dependencies
// TestImports []string // imports from TestGoFiles
// XTestImports []string // imports from XTestGoFiles
// //
// // Error information // // Error information
// Incomplete bool // this package or a dependency has an error // Incomplete bool // this package or a dependency has an error
// Error *PackageError // error loading package // Error *PackageError // error loading package
// DepsErrors []*PackageError // errors loading dependencies // DepsErrors []*PackageError // errors loading dependencies
//
// TestGoFiles []string // _test.go files in package
// TestImports []string // imports from TestGoFiles
// XTestGoFiles []string // _test.go files outside package
// XTestImports []string // imports from XTestGoFiles
// } // }
// //
// Packages stored in vendor directories report an ImportPath that includes the
// path to the vendor directory (for example, "d/vendor/p" instead of "p"),
// so that the ImportPath uniquely identifies a given copy of a package.
// The Imports, Deps, TestImports, and XTestImports lists also contain these
// expanded imports paths. See golang.org/s/go15vendor for more about vendoring.
//
// The error information, if any, is // The error information, if any, is
// //
// type PackageError struct { // type PackageError struct {
@ -852,6 +871,10 @@
// position independent executables (PIE). Packages not named // position independent executables (PIE). Packages not named
// main are ignored. // main are ignored.
// //
// -buildmode=plugin
// Build the listed main packages, plus all packages that they
// import, into a Go plugin. Packages not named main are ignored.
//
// //
// File types // File types
// //
@ -906,8 +929,13 @@
// On Windows, the value is a semicolon-separated string. // On Windows, the value is a semicolon-separated string.
// On Plan 9, the value is a list. // On Plan 9, the value is a list.
// //
// GOPATH must be set to get, build and install packages outside the // If the environment variable is unset, GOPATH defaults
// standard Go tree. // to a subdirectory named "go" in the user's home directory
// ($HOME/go on Unix, %USERPROFILE%\go on Windows),
// unless that directory holds a Go distribution.
// Run "go env GOPATH" to see the current GOPATH.
//
// See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH.
// //
// Each directory listed in GOPATH must have a prescribed structure: // Each directory listed in GOPATH must have a prescribed structure:
// //
@ -935,9 +963,9 @@
// //
// Here's an example directory layout: // Here's an example directory layout:
// //
// GOPATH=/home/user/gocode // GOPATH=/home/user/go
// //
// /home/user/gocode/ // /home/user/go/
// src/ // src/
// foo/ // foo/
// bar/ (go code in package bar) // bar/ (go code in package bar)
@ -963,7 +991,7 @@
// by code in the directory tree rooted at the parent of "internal". // by code in the directory tree rooted at the parent of "internal".
// Here's an extended version of the directory layout above: // Here's an extended version of the directory layout above:
// //
// /home/user/gocode/ // /home/user/go/
// src/ // src/
// crash/ // crash/
// bang/ (go code in package bang) // bang/ (go code in package bang)
@ -1001,7 +1029,7 @@
// but with the "internal" directory renamed to "vendor" // but with the "internal" directory renamed to "vendor"
// and a new foo/vendor/crash/bang directory added: // and a new foo/vendor/crash/bang directory added:
// //
// /home/user/gocode/ // /home/user/go/
// src/ // src/
// crash/ // crash/
// bang/ (go code in package bang) // bang/ (go code in package bang)
@ -1060,7 +1088,7 @@
// The operating system for which to compile code. // The operating system for which to compile code.
// Examples are linux, darwin, windows, netbsd. // Examples are linux, darwin, windows, netbsd.
// GOPATH // GOPATH
// See 'go help gopath'. // For more details see: 'go help gopath'.
// GORACE // GORACE
// Options for the race detector. // Options for the race detector.
// See https://golang.org/doc/articles/race_detector.html. // See https://golang.org/doc/articles/race_detector.html.
@ -1082,10 +1110,15 @@
// CGO_CXXFLAGS // CGO_CXXFLAGS
// Flags that cgo will pass to the compiler when compiling // Flags that cgo will pass to the compiler when compiling
// C++ code. // C++ code.
// CGO_FFLAGS
// Flags that cgo will pass to the compiler when compiling
// Fortran code.
// CGO_LDFLAGS // CGO_LDFLAGS
// Flags that cgo will pass to the compiler when linking. // Flags that cgo will pass to the compiler when linking.
// CXX // CXX
// The command to use to compile C++ code. // The command to use to compile C++ code.
// PKG_CONFIG
// Path to pkg-config tool.
// //
// Architecture-specific environment variables: // Architecture-specific environment variables:
// //
@ -1107,14 +1140,18 @@
// Whether the linker should use external linking mode // Whether the linker should use external linking mode
// when using -linkmode=auto with code that uses cgo. // when using -linkmode=auto with code that uses cgo.
// Set to 0 to disable external linking mode, 1 to enable it. // Set to 0 to disable external linking mode, 1 to enable it.
// GIT_ALLOW_PROTOCOL
// Defined by Git. A colon-separated list of schemes that are allowed to be used
// with git fetch/clone. If set, any scheme not explicitly mentioned will be
// considered insecure by 'go get'.
// //
// //
// Import path syntax // Import path syntax
// //
// An import path (see 'go help packages') denotes a package // An import path (see 'go help packages') denotes a package stored in the local
// stored in the local file system. In general, an import path denotes // file system. In general, an import path denotes either a standard package (such
// either a standard package (such as "unicode/utf8") or a package // as "unicode/utf8") or a package found in one of the work spaces (For more
// found in one of the work spaces (see 'go help gopath'). // details see: 'go help gopath').
// //
// Relative import paths // Relative import paths
// //
@ -1206,6 +1243,11 @@
// each is tried in turn when downloading. For example, a Git // each is tried in turn when downloading. For example, a Git
// download tries https://, then git+ssh://. // download tries https://, then git+ssh://.
// //
// By default, downloads are restricted to known secure protocols
// (e.g. https, ssh). To override this setting for Git downloads, the
// GIT_ALLOW_PROTOCOL environment variable can be set (For more details see:
// 'go help environment').
//
// If the import path is not a known code hosting site and also lacks a // If the import path is not a known code hosting site and also lacks a
// version control qualifier, the go tool attempts to fetch the import // version control qualifier, the go tool attempts to fetch the import
// over https/http and looks for a <meta> tag in the document's HTML // over https/http and looks for a <meta> tag in the document's HTML
@ -1246,8 +1288,8 @@
// same meta tag and then git clone https://code.org/r/p/exproj into // same meta tag and then git clone https://code.org/r/p/exproj into
// GOPATH/src/example.org. // GOPATH/src/example.org.
// //
// New downloaded packages are written to the first directory // New downloaded packages are written to the first directory listed in the GOPATH
// listed in the GOPATH environment variable (see 'go help gopath'). // environment variable (For more details see: 'go help gopath').
// //
// The go command attempts to download the version of the // The go command attempts to download the version of the
// package appropriate for the Go release being used. // package appropriate for the Go release being used.
@ -1291,7 +1333,7 @@
// //
// Otherwise, the import path P denotes the package found in // Otherwise, the import path P denotes the package found in
// the directory DIR/src/P for some DIR listed in the GOPATH // the directory DIR/src/P for some DIR listed in the GOPATH
// environment variable (see 'go help gopath'). // environment variable (For more details see: 'go help gopath').
// //
// If no import paths are given, the action applies to the // If no import paths are given, the action applies to the
// package in the current directory. // package in the current directory.
@ -1311,6 +1353,9 @@
// - "cmd" expands to the Go repository's commands and their // - "cmd" expands to the Go repository's commands and their
// internal libraries. // internal libraries.
// //
// Import paths beginning with "cmd/" only match source code in
// the Go repository.
//
// An import path is a pattern if it includes one or more "..." wildcards, // An import path is a pattern if it includes one or more "..." wildcards,
// each of which can match any string, including the empty string and // each of which can match any string, including the empty string and
// strings containing slashes. Such a pattern expands to all package // strings containing slashes. Such a pattern expands to all package
@ -1366,28 +1411,11 @@
// By default, no benchmarks run. To run all benchmarks, // By default, no benchmarks run. To run all benchmarks,
// use '-bench .' or '-bench=.'. // use '-bench .' or '-bench=.'.
// //
// -benchmem
// Print memory allocation statistics for benchmarks.
//
// -benchtime t // -benchtime t
// Run enough iterations of each benchmark to take t, specified // Run enough iterations of each benchmark to take t, specified
// as a time.Duration (for example, -benchtime 1h30s). // as a time.Duration (for example, -benchtime 1h30s).
// The default is 1 second (1s). // The default is 1 second (1s).
// //
// -blockprofile block.out
// Write a goroutine blocking profile to the specified file
// when all tests are complete.
// Writes test binary as -c would.
//
// -blockprofilerate n
// Control the detail provided in goroutine blocking profiles by
// calling runtime.SetBlockProfileRate with n.
// See 'go doc runtime.SetBlockProfileRate'.
// The profiler aims to sample, on average, one blocking event every
// n nanoseconds the program spends blocked. By default,
// if -test.blockprofile is set without this flag, all blocking events
// are recorded, equivalent to -test.blockprofilerate=1.
//
// -count n // -count n
// Run each test and benchmark n times (default 1). // Run each test and benchmark n times (default 1).
// If -cpu is set, run n times for each GOMAXPROCS value. // If -cpu is set, run n times for each GOMAXPROCS value.
@ -1413,33 +1441,11 @@
// Packages are specified as import paths. // Packages are specified as import paths.
// Sets -cover. // Sets -cover.
// //
// -coverprofile cover.out
// Write a coverage profile to the file after all tests have passed.
// Sets -cover.
//
// -cpu 1,2,4 // -cpu 1,2,4
// Specify a list of GOMAXPROCS values for which the tests or // Specify a list of GOMAXPROCS values for which the tests or
// benchmarks should be executed. The default is the current value // benchmarks should be executed. The default is the current value
// of GOMAXPROCS. // of GOMAXPROCS.
// //
// -cpuprofile cpu.out
// Write a CPU profile to the specified file before exiting.
// Writes test binary as -c would.
//
// -memprofile mem.out
// Write a memory profile to the file after all tests have passed.
// Writes test binary as -c would.
//
// -memprofilerate n
// Enable more precise (and expensive) memory profiles by setting
// runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'.
// To profile all memory allocations, use -test.memprofilerate=1
// and pass --alloc_space flag to the pprof tool.
//
// -outputdir directory
// Place output files from profiling in the specified directory,
// by default the directory in which "go test" is running.
//
// -parallel n // -parallel n
// Allow parallel execution of test functions that call t.Parallel. // Allow parallel execution of test functions that call t.Parallel.
// The value of this flag is the maximum number of tests to run // The value of this flag is the maximum number of tests to run
@ -1465,13 +1471,64 @@
// If a test runs longer than t, panic. // If a test runs longer than t, panic.
// The default is 10 minutes (10m). // The default is 10 minutes (10m).
// //
// -trace trace.out
// Write an execution trace to the specified file before exiting.
//
// -v // -v
// Verbose output: log all tests as they are run. Also print all // Verbose output: log all tests as they are run. Also print all
// text from Log and Logf calls even if the test succeeds. // text from Log and Logf calls even if the test succeeds.
// //
// The following flags are also recognized by 'go test' and can be used to
// profile the tests during execution:
//
// -benchmem
// Print memory allocation statistics for benchmarks.
//
// -blockprofile block.out
// Write a goroutine blocking profile to the specified file
// when all tests are complete.
// Writes test binary as -c would.
//
// -blockprofilerate n
// Control the detail provided in goroutine blocking profiles by
// calling runtime.SetBlockProfileRate with n.
// See 'go doc runtime.SetBlockProfileRate'.
// The profiler aims to sample, on average, one blocking event every
// n nanoseconds the program spends blocked. By default,
// if -test.blockprofile is set without this flag, all blocking events
// are recorded, equivalent to -test.blockprofilerate=1.
//
// -coverprofile cover.out
// Write a coverage profile to the file after all tests have passed.
// Sets -cover.
//
// -cpuprofile cpu.out
// Write a CPU profile to the specified file before exiting.
// Writes test binary as -c would.
//
// -memprofile mem.out
// Write a memory profile to the file after all tests have passed.
// Writes test binary as -c would.
//
// -memprofilerate n
// Enable more precise (and expensive) memory profiles by setting
// runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'.
// To profile all memory allocations, use -test.memprofilerate=1
// and pass --alloc_space flag to the pprof tool.
//
// -mutexprofile mutex.out
// Write a mutex contention profile to the specified file
// when all tests are complete.
// Writes test binary as -c would.
//
// -mutexprofilefraction n
// Sample 1 in n stack traces of goroutines holding a
// contended mutex.
//
// -outputdir directory
// Place output files from profiling in the specified directory,
// by default the directory in which "go test" is running.
//
// -trace trace.out
// Write an execution trace to the specified file before exiting.
//
// Each of these flags is also recognized with an optional 'test.' prefix, // Each of these flags is also recognized with an optional 'test.' prefix,
// as in -test.v. When invoking the generated test binary (the result of // as in -test.v. When invoking the generated test binary (the result of
// 'go test -c') directly, however, the prefix is mandatory. // 'go test -c') directly, however, the prefix is mandatory.
@ -1551,7 +1608,8 @@
// is compared exactly against the comment (see examples below). If the last // is compared exactly against the comment (see examples below). If the last
// comment begins with "Unordered output:" then the output is compared to the // comment begins with "Unordered output:" then the output is compared to the
// comment, however the order of the lines is ignored. An example with no such // comment, however the order of the lines is ignored. An example with no such
// comment, or with no text after "Output:" is compiled but not executed. // comment is compiled but not executed. An example with no text after
// "Output:" is compiled, executed, and expected to produce no output.
// //
// Godoc displays the body of ExampleXXX to demonstrate the use // Godoc displays the body of ExampleXXX to demonstrate the use
// of the function, constant, or variable XXX. An example of a method M with // of the function, constant, or variable XXX. An example of a method M with

View file

@ -36,3 +36,6 @@ func httpsOrHTTP(importPath string, security securityMode) (string, io.ReadClose
func parseMetaGoImports(r io.Reader) ([]metaImport, error) { func parseMetaGoImports(r io.Reader) ([]metaImport, error) {
panic("unreachable") panic("unreachable")
} }
func queryEscape(s string) string { panic("unreachable") }
func openBrowser(url string) bool { panic("unreachable") }

213
libgo/go/cmd/go/bug.go Normal file
View file

@ -0,0 +1,213 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
)
var cmdBug = &Command{
Run: runBug,
UsageLine: "bug",
Short: "print information for bug reports",
Long: `
Bug prints information that helps file effective bug reports.
Bugs may be reported at https://golang.org/issue/new.
`,
}
func init() {
cmdBug.Flag.BoolVar(&buildV, "v", false, "")
}
func runBug(cmd *Command, args []string) {
var buf bytes.Buffer
buf.WriteString(bugHeader)
inspectGoVersion(&buf)
fmt.Fprint(&buf, "#### System details\n\n")
fmt.Fprintln(&buf, "```")
fmt.Fprintf(&buf, "go version %s %s/%s\n", runtime.Version(), runtime.GOOS, runtime.GOARCH)
env := newEnv
env = append(env, extraEnvVars()...)
for _, e := range env {
// Hide the TERM environment variable from "go bug".
// See issue #18128
if e.name != "TERM" {
fmt.Fprintf(&buf, "%s=\"%s\"\n", e.name, e.value)
}
}
printGoDetails(&buf)
printOSDetails(&buf)
printCDetails(&buf)
fmt.Fprintln(&buf, "```")
body := buf.String()
url := "https://github.com/golang/go/issues/new?body=" + queryEscape(body)
if !openBrowser(url) {
fmt.Print("Please file a new issue at golang.org/issue/new using this template:\n\n")
fmt.Print(body)
}
}
const bugHeader = `Please answer these questions before submitting your issue. Thanks!
#### What did you do?
If possible, provide a recipe for reproducing the error.
A complete runnable program is good.
A link on play.golang.org is best.
#### What did you expect to see?
#### What did you see instead?
`
func printGoDetails(w io.Writer) {
printCmdOut(w, "GOROOT/bin/go version: ", filepath.Join(runtime.GOROOT(), "bin/go"), "version")
printCmdOut(w, "GOROOT/bin/go tool compile -V: ", filepath.Join(runtime.GOROOT(), "bin/go"), "tool", "compile", "-V")
}
func printOSDetails(w io.Writer) {
switch runtime.GOOS {
case "darwin":
printCmdOut(w, "uname -v: ", "uname", "-v")
printCmdOut(w, "", "sw_vers")
case "linux":
printCmdOut(w, "uname -sr: ", "uname", "-sr")
printCmdOut(w, "", "lsb_release", "-a")
printGlibcVersion(w)
case "openbsd", "netbsd", "freebsd", "dragonfly":
printCmdOut(w, "uname -v: ", "uname", "-v")
case "solaris":
out, err := ioutil.ReadFile("/etc/release")
if err == nil {
fmt.Fprintf(w, "/etc/release: %s\n", out)
} else {
if buildV {
fmt.Printf("failed to read /etc/release: %v\n", err)
}
}
}
}
func printCDetails(w io.Writer) {
printCmdOut(w, "lldb --version: ", "lldb", "--version")
cmd := exec.Command("gdb", "--version")
out, err := cmd.Output()
if err == nil {
// There's apparently no combination of command line flags
// to get gdb to spit out its version without the license and warranty.
// Print up to the first newline.
fmt.Fprintf(w, "gdb --version: %s\n", firstLine(out))
} else {
if buildV {
fmt.Printf("failed to run gdb --version: %v\n", err)
}
}
}
func inspectGoVersion(w io.Writer) {
data, err := httpGET("https://golang.org/VERSION?m=text")
if err != nil {
if buildV {
fmt.Printf("failed to read from golang.org/VERSION: %v\n", err)
}
return
}
// golang.org/VERSION currently returns a whitespace-free string,
// but just in case, protect against that changing.
// Similarly so for runtime.Version.
release := string(bytes.TrimSpace(data))
vers := strings.TrimSpace(runtime.Version())
if vers == release {
// Up to date
return
}
// Devel version or outdated release. Either way, this request is apropos.
fmt.Fprintf(w, "#### Does this issue reproduce with the latest release (%s)?\n\n\n", release)
}
// printCmdOut prints the output of running the given command.
// It ignores failures; 'go bug' is best effort.
func printCmdOut(w io.Writer, prefix, path string, args ...string) {
cmd := exec.Command(path, args...)
out, err := cmd.Output()
if err != nil {
if buildV {
fmt.Printf("%s %s: %v\n", path, strings.Join(args, " "), err)
}
return
}
fmt.Fprintf(w, "%s%s\n", prefix, bytes.TrimSpace(out))
}
// firstLine returns the first line of a given byte slice.
func firstLine(buf []byte) []byte {
idx := bytes.IndexByte(buf, '\n')
if idx > 0 {
buf = buf[:idx]
}
return bytes.TrimSpace(buf)
}
// printGlibcVersion prints information about the glibc version.
// It ignores failures.
func printGlibcVersion(w io.Writer) {
tempdir := os.TempDir()
if tempdir == "" {
return
}
src := []byte(`int main() {}`)
srcfile := filepath.Join(tempdir, "go-bug.c")
outfile := filepath.Join(tempdir, "go-bug")
err := ioutil.WriteFile(srcfile, src, 0644)
if err != nil {
return
}
defer os.Remove(srcfile)
cmd := exec.Command("gcc", "-o", outfile, srcfile)
if _, err = cmd.CombinedOutput(); err != nil {
return
}
defer os.Remove(outfile)
cmd = exec.Command("ldd", outfile)
out, err := cmd.CombinedOutput()
if err != nil {
return
}
re := regexp.MustCompile(`libc\.so[^ ]* => ([^ ]+)`)
m := re.FindStringSubmatch(string(out))
if m == nil {
return
}
cmd = exec.Command(m[1])
out, err = cmd.Output()
if err != nil {
return
}
fmt.Fprintf(w, "%s: %s\n", m[1], firstLine(out))
// print another line (the one containing version string) in case of musl libc
if idx := bytes.IndexByte(out, '\n'); bytes.Index(out, []byte("musl")) != -1 && idx > -1 {
fmt.Fprintf(w, "%s\n", firstLine(out[idx+1:]))
}
}

View file

@ -346,6 +346,13 @@ func buildModeInit() {
case "darwin/arm", "darwin/arm64": case "darwin/arm", "darwin/arm64":
codegenArg = "-shared" codegenArg = "-shared"
default: default:
switch goos {
case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris":
// Use -shared so that the result is
// suitable for inclusion in a PIE or
// shared library.
codegenArg = "-shared"
}
} }
exeSuffix = ".a" exeSuffix = ".a"
ldBuildmode = "c-archive" ldBuildmode = "c-archive"
@ -407,6 +414,21 @@ func buildModeInit() {
fatalf("-buildmode=shared and -o not supported together") fatalf("-buildmode=shared and -o not supported together")
} }
ldBuildmode = "shared" ldBuildmode = "shared"
case "plugin":
pkgsFilter = pkgsMain
if gccgo {
codegenArg = "-fPIC"
} else {
switch platform {
case "linux/amd64", "linux/arm", "linux/arm64", "linux/386",
"android/amd64", "android/arm", "android/arm64", "android/386":
default:
fatalf("-buildmode=plugin not supported on %s\n", platform)
}
codegenArg = "-dynlink"
}
exeSuffix = ".so"
ldBuildmode = "plugin"
default: default:
fatalf("buildmode=%s not supported", buildBuildmode) fatalf("buildmode=%s not supported", buildBuildmode)
} }
@ -432,10 +454,13 @@ func buildModeInit() {
buildAsmflags = append(buildAsmflags, codegenArg) buildAsmflags = append(buildAsmflags, codegenArg)
buildGcflags = append(buildGcflags, codegenArg) buildGcflags = append(buildGcflags, codegenArg)
} }
if buildContext.InstallSuffix != "" { // Don't alter InstallSuffix when modifying default codegen args.
buildContext.InstallSuffix += "_" if buildBuildmode != "default" || buildLinkshared {
if buildContext.InstallSuffix != "" {
buildContext.InstallSuffix += "_"
}
buildContext.InstallSuffix += codegenArg[1:]
} }
buildContext.InstallSuffix += codegenArg[1:]
} }
} }
@ -452,6 +477,11 @@ func runBuild(cmd *Command, args []string) {
*buildO += exeSuffix *buildO += exeSuffix
} }
// Special case -o /dev/null by not writing at all.
if *buildO == os.DevNull {
*buildO = ""
}
// sanity check some often mis-used options // sanity check some often mis-used options
switch buildContext.Compiler { switch buildContext.Compiler {
case "gccgo": case "gccgo":
@ -580,6 +610,10 @@ func libname(args []string, pkgs []*Package) (string, error) {
} }
func runInstall(cmd *Command, args []string) { func runInstall(cmd *Command, args []string) {
installPackages(args, false)
}
func installPackages(args []string, forGet bool) {
if gobin != "" && !filepath.IsAbs(gobin) { if gobin != "" && !filepath.IsAbs(gobin) {
fatalf("cannot install, GOBIN must be an absolute path") fatalf("cannot install, GOBIN must be an absolute path")
} }
@ -599,7 +633,7 @@ func runInstall(cmd *Command, args []string) {
errorf("go install: no install location for %s: hidden by %s", p.Dir, p.ConflictDir) errorf("go install: no install location for %s: hidden by %s", p.Dir, p.ConflictDir)
default: default:
errorf("go install: no install location for directory %s outside GOPATH\n"+ errorf("go install: no install location for directory %s outside GOPATH\n"+
"\tFor more details see: go help gopath", p.Dir) "\tFor more details see: 'go help gopath'", p.Dir)
} }
} }
} }
@ -607,6 +641,8 @@ func runInstall(cmd *Command, args []string) {
var b builder var b builder
b.init() b.init()
// Set the behavior for `go get` to not error on packages with test files only.
b.testFilesOnlyOK = forGet
var a *action var a *action
if buildBuildmode == "shared" { if buildBuildmode == "shared" {
if libName, err := libname(args, pkgs); err != nil { if libName, err := libname(args, pkgs); err != nil {
@ -697,6 +733,8 @@ type builder struct {
flagCache map[string]bool // a cache of supported compiler flags flagCache map[string]bool // a cache of supported compiler flags
print func(args ...interface{}) (int, error) print func(args ...interface{}) (int, error)
testFilesOnlyOK bool // do not error if the packages only have test files
output sync.Mutex output sync.Mutex
scriptDir string // current directory in printed script scriptDir string // current directory in printed script
@ -1283,6 +1321,8 @@ func (b *builder) do(root *action) {
if err != nil { if err != nil {
if err == errPrintedOutput { if err == errPrintedOutput {
setExitStatus(2) setExitStatus(2)
} else if _, ok := err.(*build.NoGoError); ok && len(a.p.TestGoFiles) > 0 && b.testFilesOnlyOK {
// Ignore the "no buildable Go source files" error for a package with only test files.
} else { } else {
errorf("%s", err) errorf("%s", err)
} }
@ -1369,7 +1409,7 @@ func (b *builder) build(a *action) (err error) {
} }
defer func() { defer func() {
if err != nil && err != errPrintedOutput { if _, ok := err.(*build.NoGoError); err != nil && err != errPrintedOutput && !(ok && b.testFilesOnlyOK && len(a.p.TestGoFiles) > 0) {
err = fmt.Errorf("go build %s: %v", a.p.ImportPath, err) err = fmt.Errorf("go build %s: %v", a.p.ImportPath, err)
} }
}() }()
@ -1400,7 +1440,7 @@ func (b *builder) build(a *action) (err error) {
} }
} }
var gofiles, cgofiles, cfiles, sfiles, cxxfiles, objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string var gofiles, cgofiles, objdirCgofiles, cfiles, sfiles, cxxfiles, objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string
gofiles = append(gofiles, a.p.GoFiles...) gofiles = append(gofiles, a.p.GoFiles...)
cgofiles = append(cgofiles, a.p.CgoFiles...) cgofiles = append(cgofiles, a.p.CgoFiles...)
@ -1422,7 +1462,7 @@ func (b *builder) build(a *action) (err error) {
if err != nil { if err != nil {
return err return err
} }
cgofiles = append(cgofiles, outGo...) objdirCgofiles = append(objdirCgofiles, outGo...)
cfiles = append(cfiles, outC...) cfiles = append(cfiles, outC...)
cxxfiles = append(cxxfiles, outCXX...) cxxfiles = append(cxxfiles, outCXX...)
} }
@ -1457,7 +1497,7 @@ func (b *builder) build(a *action) (err error) {
if a.cgo != nil && a.cgo.target != "" { if a.cgo != nil && a.cgo.target != "" {
cgoExe = a.cgo.target cgoExe = a.cgo.target
} }
outGo, outObj, err := b.cgo(a.p, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, cxxfiles, a.p.MFiles, a.p.FFiles) outGo, outObj, err := b.cgo(a, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, objdirCgofiles, gccfiles, cxxfiles, a.p.MFiles, a.p.FFiles)
if err != nil { if err != nil {
return err return err
} }
@ -1555,12 +1595,12 @@ func (b *builder) build(a *action) (err error) {
} }
// Assemble .s files. // Assemble .s files.
for _, file := range sfiles { if len(sfiles) > 0 {
out := file[:len(file)-len(".s")] + ".o" ofiles, err := buildToolchain.asm(b, a.p, obj, sfiles)
if err := buildToolchain.asm(b, a.p, obj, obj+out, file); err != nil { if err != nil {
return err return err
} }
objects = append(objects, out) objects = append(objects, ofiles...)
} }
// NOTE(rsc): On Windows, it is critically important that the // NOTE(rsc): On Windows, it is critically important that the
@ -1599,23 +1639,62 @@ func (b *builder) build(a *action) (err error) {
return nil return nil
} }
// pkgconfigCmd returns a pkg-config binary name
// defaultPkgConfig is defined in zdefaultcc.go, written by cmd/dist.
func (b *builder) pkgconfigCmd() string {
return envList("PKG_CONFIG", defaultPkgConfig)[0]
}
// splitPkgConfigOutput parses the pkg-config output into a slice of
// flags. pkg-config always uses \ to escape special characters.
func splitPkgConfigOutput(out []byte) []string {
if len(out) == 0 {
return nil
}
var flags []string
flag := make([]byte, len(out))
r, w := 0, 0
for r < len(out) {
switch out[r] {
case ' ', '\t', '\r', '\n':
if w > 0 {
flags = append(flags, string(flag[:w]))
}
w = 0
case '\\':
r++
fallthrough
default:
if r < len(out) {
flag[w] = out[r]
w++
}
}
r++
}
if w > 0 {
flags = append(flags, string(flag[:w]))
}
return flags
}
// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package. // Calls pkg-config if needed and returns the cflags/ldflags needed to build the package.
func (b *builder) getPkgConfigFlags(p *Package) (cflags, ldflags []string, err error) { func (b *builder) getPkgConfigFlags(p *Package) (cflags, ldflags []string, err error) {
if pkgs := p.CgoPkgConfig; len(pkgs) > 0 { if pkgs := p.CgoPkgConfig; len(pkgs) > 0 {
var out []byte var out []byte
out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--cflags", pkgs) out, err = b.runOut(p.Dir, p.ImportPath, nil, b.pkgconfigCmd(), "--cflags", pkgs)
if err != nil { if err != nil {
b.showOutput(p.Dir, "pkg-config --cflags "+strings.Join(pkgs, " "), string(out)) b.showOutput(p.Dir, b.pkgconfigCmd()+" --cflags "+strings.Join(pkgs, " "), string(out))
b.print(err.Error() + "\n") b.print(err.Error() + "\n")
err = errPrintedOutput err = errPrintedOutput
return return
} }
if len(out) > 0 { if len(out) > 0 {
cflags = strings.Fields(string(out)) cflags = splitPkgConfigOutput(out)
} }
out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--libs", pkgs) out, err = b.runOut(p.Dir, p.ImportPath, nil, b.pkgconfigCmd(), "--libs", pkgs)
if err != nil { if err != nil {
b.showOutput(p.Dir, "pkg-config --libs "+strings.Join(pkgs, " "), string(out)) b.showOutput(p.Dir, b.pkgconfigCmd()+" --libs "+strings.Join(pkgs, " "), string(out))
b.print(err.Error() + "\n") b.print(err.Error() + "\n")
err = errPrintedOutput err = errPrintedOutput
return return
@ -1656,7 +1735,7 @@ func (b *builder) install(a *action) (err error) {
perm := os.FileMode(0666) perm := os.FileMode(0666)
if a1.link { if a1.link {
switch buildBuildmode { switch buildBuildmode {
case "c-archive", "c-shared": case "c-archive", "c-shared", "plugin":
default: default:
perm = 0777 perm = 0777
} }
@ -2197,9 +2276,9 @@ type toolchain interface {
// cc runs the toolchain's C compiler in a directory on a C file // cc runs the toolchain's C compiler in a directory on a C file
// to produce an output file. // to produce an output file.
cc(b *builder, p *Package, objdir, ofile, cfile string) error cc(b *builder, p *Package, objdir, ofile, cfile string) error
// asm runs the assembler in a specific directory on a specific file // asm runs the assembler in a specific directory on specific files
// to generate the named output file. // and returns a list of named output files.
asm(b *builder, p *Package, obj, ofile, sfile string) error asm(b *builder, p *Package, obj string, sfiles []string) ([]string, error)
// pkgpath builds an appropriate path for a temporary package file. // pkgpath builds an appropriate path for a temporary package file.
pkgpath(basedir string, p *Package) string pkgpath(basedir string, p *Package) string
// pack runs the archive packer in a specific directory to create // pack runs the archive packer in a specific directory to create
@ -2236,8 +2315,8 @@ func (noToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool,
return "", nil, noCompiler() return "", nil, noCompiler()
} }
func (noToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error { func (noToolchain) asm(b *builder, p *Package, obj string, sfiles []string) ([]string, error) {
return noCompiler() return nil, noCompiler()
} }
func (noToolchain) pkgpath(basedir string, p *Package) string { func (noToolchain) pkgpath(basedir string, p *Package) string {
@ -2342,11 +2421,10 @@ func (gcToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool,
return ofile, output, err return ofile, output, err
} }
func (gcToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error { func (gcToolchain) asm(b *builder, p *Package, obj string, sfiles []string) ([]string, error) {
// Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files. // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
inc := filepath.Join(goroot, "pkg", "include") inc := filepath.Join(goroot, "pkg", "include")
sfile = mkAbs(p.Dir, sfile) args := []interface{}{buildToolExec, tool("asm"), "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags}
args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags}
if p.ImportPath == "runtime" && goarch == "386" { if p.ImportPath == "runtime" && goarch == "386" {
for _, arg := range buildAsmflags { for _, arg := range buildAsmflags {
if arg == "-dynlink" { if arg == "-dynlink" {
@ -2354,11 +2432,16 @@ func (gcToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error {
} }
} }
} }
args = append(args, sfile) var ofiles []string
if err := b.run(p.Dir, p.ImportPath, nil, args...); err != nil { for _, sfile := range sfiles {
return err ofile := obj + sfile[:len(sfile)-len(".s")] + ".o"
ofiles = append(ofiles, ofile)
a := append(args, "-o", ofile, mkAbs(p.Dir, sfile))
if err := b.run(p.Dir, p.ImportPath, nil, a...); err != nil {
return nil, err
}
} }
return nil return ofiles, nil
} }
// toolVerify checks that the command line args writes the same output file // toolVerify checks that the command line args writes the same output file
@ -2516,6 +2599,13 @@ func (gcToolchain) ld(b *builder, root *action, out string, allactions []*action
if root.p.omitDWARF { if root.p.omitDWARF {
ldflags = append(ldflags, "-w") ldflags = append(ldflags, "-w")
} }
if buildBuildmode == "plugin" {
pluginpath := root.p.ImportPath
if pluginpath == "command-line-arguments" {
pluginpath = "plugin/unnamed-" + root.p.buildID
}
ldflags = append(ldflags, "-pluginpath", pluginpath)
}
// If the user has not specified the -extld option, then specify the // If the user has not specified the -extld option, then specify the
// appropriate linker. In case of C++ code, use the compiler named // appropriate linker. In case of C++ code, use the compiler named
@ -2625,15 +2715,24 @@ func (tools gccgoToolchain) gc(b *builder, p *Package, archive, obj string, asmh
return ofile, output, err return ofile, output, err
} }
func (tools gccgoToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error { func (tools gccgoToolchain) asm(b *builder, p *Package, obj string, sfiles []string) ([]string, error) {
sfile = mkAbs(p.Dir, sfile) var ofiles []string
defs := []string{"-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch} for _, sfile := range sfiles {
if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { ofile := obj + sfile[:len(sfile)-len(".s")] + ".o"
defs = append(defs, `-D`, `GOPKGPATH=`+pkgpath) ofiles = append(ofiles, ofile)
sfile = mkAbs(p.Dir, sfile)
defs := []string{"-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch}
if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" {
defs = append(defs, `-D`, `GOPKGPATH=`+pkgpath)
}
defs = tools.maybePIC(defs)
defs = append(defs, b.gccArchArgs()...)
err := b.run(p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", obj, "-c", "-o", ofile, defs, sfile)
if err != nil {
return nil, err
}
} }
defs = tools.maybePIC(defs) return ofiles, nil
defs = append(defs, b.gccArchArgs()...)
return b.run(p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", obj, "-c", "-o", ofile, defs, sfile)
} }
func (gccgoToolchain) pkgpath(basedir string, p *Package) string { func (gccgoToolchain) pkgpath(basedir string, p *Package) string {
@ -2755,7 +2854,7 @@ func (tools gccgoToolchain) link(b *builder, root *action, out string, allaction
if !apackagePathsSeen[a.p.ImportPath] { if !apackagePathsSeen[a.p.ImportPath] {
apackagePathsSeen[a.p.ImportPath] = true apackagePathsSeen[a.p.ImportPath] = true
target := a.target target := a.target
if len(a.p.CgoFiles) > 0 { if len(a.p.CgoFiles) > 0 || a.p.usesSwig() {
target, err = readAndRemoveCgoFlags(target) target, err = readAndRemoveCgoFlags(target)
if err != nil { if err != nil {
return return
@ -2947,7 +3046,7 @@ func (tools gccgoToolchain) cc(b *builder, p *Package, objdir, ofile, cfile stri
// maybePIC adds -fPIC to the list of arguments if needed. // maybePIC adds -fPIC to the list of arguments if needed.
func (tools gccgoToolchain) maybePIC(args []string) []string { func (tools gccgoToolchain) maybePIC(args []string) []string {
switch buildBuildmode { switch buildBuildmode {
case "c-shared", "shared": case "c-shared", "shared", "plugin":
args = append(args, "-fPIC") args = append(args, "-fPIC")
} }
return args return args
@ -2988,9 +3087,19 @@ func (b *builder) gfortran(p *Package, out string, flags []string, ffile string)
} }
// ccompile runs the given C or C++ compiler and creates an object from a single source file. // ccompile runs the given C or C++ compiler and creates an object from a single source file.
func (b *builder) ccompile(p *Package, out string, flags []string, file string, compiler []string) error { func (b *builder) ccompile(p *Package, outfile string, flags []string, file string, compiler []string) error {
file = mkAbs(p.Dir, file) file = mkAbs(p.Dir, file)
return b.run(p.Dir, p.ImportPath, nil, compiler, flags, "-o", out, "-c", file) desc := p.ImportPath
output, err := b.runOut(p.Dir, desc, nil, compiler, flags, "-o", outfile, "-c", file)
if len(output) > 0 {
b.showOutput(p.Dir, desc, b.processOutput(output))
if err != nil {
err = errPrintedOutput
} else if os.Getenv("GO_BUILDER_NAME") != "" {
return errors.New("C compiler warning promoted to error on Go builders")
}
}
return err
} }
// gccld runs the gcc linker to create an executable from a set of object files. // gccld runs the gcc linker to create an executable from a set of object files.
@ -3129,6 +3238,8 @@ func (b *builder) gccArchArgs() []string {
return []string{"-m64", "-march=z196"} return []string{"-m64", "-march=z196"}
case "mips64", "mips64le": case "mips64", "mips64le":
return []string{"-mabi=64"} return []string{"-mabi=64"}
case "mips", "mipsle":
return []string{"-mabi=32", "-march=mips32"}
} }
return nil return nil
} }
@ -3144,11 +3255,8 @@ func envList(key, def string) []string {
} }
// Return the flags to use when invoking the C, C++ or Fortran compilers, or cgo. // Return the flags to use when invoking the C, C++ or Fortran compilers, or cgo.
func (b *builder) cflags(p *Package, def bool) (cppflags, cflags, cxxflags, fflags, ldflags []string) { func (b *builder) cflags(p *Package) (cppflags, cflags, cxxflags, fflags, ldflags []string) {
var defaults string defaults := "-g -O2"
if def {
defaults = "-g -O2"
}
cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
@ -3160,9 +3268,9 @@ func (b *builder) cflags(p *Package, def bool) (cppflags, cflags, cxxflags, ffla
var cgoRe = regexp.MustCompile(`[/\\:]`) var cgoRe = regexp.MustCompile(`[/\\:]`)
func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) { func (b *builder) cgo(a *action, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofiles, objdirCgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) {
cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS := b.cflags(p, true) p := a.p
_, cgoexeCFLAGS, _, _, _ := b.cflags(p, false) cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS := b.cflags(p)
cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...) cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...)
// If we are compiling Objective-C code, then we need to link against libobjc // If we are compiling Objective-C code, then we need to link against libobjc
@ -3183,7 +3291,7 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
} }
} }
if buildMSan && p.ImportPath != "runtime/cgo" { if buildMSan {
cgoCFLAGS = append([]string{"-fsanitize=memory"}, cgoCFLAGS...) cgoCFLAGS = append([]string{"-fsanitize=memory"}, cgoCFLAGS...)
cgoLDFLAGS = append([]string{"-fsanitize=memory"}, cgoLDFLAGS...) cgoLDFLAGS = append([]string{"-fsanitize=memory"}, cgoLDFLAGS...)
} }
@ -3191,20 +3299,33 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
// Allows including _cgo_export.h from .[ch] files in the package. // Allows including _cgo_export.h from .[ch] files in the package.
cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", obj) cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", obj)
// If we have cgo files in the object directory, then copy any
// other cgo files into the object directory, and pass a
// -srcdir option to cgo.
var srcdirarg []string
if len(objdirCgofiles) > 0 {
for _, fn := range cgofiles {
if err := b.copyFile(a, obj+filepath.Base(fn), filepath.Join(p.Dir, fn), 0666, false); err != nil {
return nil, nil, err
}
}
cgofiles = append(cgofiles, objdirCgofiles...)
srcdirarg = []string{"-srcdir", obj}
}
// cgo // cgo
// TODO: CGO_FLAGS? // TODO: CGO_FLAGS?
gofiles := []string{obj + "_cgo_gotypes.go"} gofiles := []string{obj + "_cgo_gotypes.go"}
cfiles := []string{"_cgo_main.c", "_cgo_export.c"} cfiles := []string{"_cgo_export.c"}
for _, fn := range cgofiles { for _, fn := range cgofiles {
f := cgoRe.ReplaceAllString(fn[:len(fn)-2], "_") f := cgoRe.ReplaceAllString(fn[:len(fn)-2], "_")
gofiles = append(gofiles, obj+f+"cgo1.go") gofiles = append(gofiles, obj+f+"cgo1.go")
cfiles = append(cfiles, f+"cgo2.c") cfiles = append(cfiles, f+"cgo2.c")
} }
defunC := obj + "_cgo_defun.c"
cgoflags := []string{}
// TODO: make cgo not depend on $GOARCH? // TODO: make cgo not depend on $GOARCH?
cgoflags := []string{}
if p.Standard && p.ImportPath == "runtime/cgo" { if p.Standard && p.ImportPath == "runtime/cgo" {
cgoflags = append(cgoflags, "-import_runtime_cgo=false") cgoflags = append(cgoflags, "-import_runtime_cgo=false")
} }
@ -3241,28 +3362,125 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
cgoflags = append(cgoflags, "-exportheader="+obj+"_cgo_install.h") cgoflags = append(cgoflags, "-exportheader="+obj+"_cgo_install.h")
} }
if err := b.run(p.Dir, p.ImportPath, cgoenv, buildToolExec, cgoExe, "-objdir", obj, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoexeCFLAGS, cgofiles); err != nil { if err := b.run(p.Dir, p.ImportPath, cgoenv, buildToolExec, cgoExe, srcdirarg, "-objdir", obj, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoCFLAGS, cgofiles); err != nil {
return nil, nil, err return nil, nil, err
} }
outGo = append(outGo, gofiles...) outGo = append(outGo, gofiles...)
// cc _cgo_defun.c // gcc
_, gccgo := buildToolchain.(gccgoToolchain) cflags := stringList(cgoCPPFLAGS, cgoCFLAGS)
if gccgo { for _, cfile := range cfiles {
ofile := obj + cfile[:len(cfile)-1] + "o"
if err := b.gcc(p, ofile, cflags, obj+cfile); err != nil {
return nil, nil, err
}
outObj = append(outObj, ofile)
}
for _, file := range gccfiles {
base := filepath.Base(file)
ofile := obj + cgoRe.ReplaceAllString(base[:len(base)-1], "_") + "o"
if err := b.gcc(p, ofile, cflags, file); err != nil {
return nil, nil, err
}
outObj = append(outObj, ofile)
}
cxxflags := stringList(cgoCPPFLAGS, cgoCXXFLAGS)
for _, file := range gxxfiles {
// Append .o to the file, just in case the pkg has file.c and file.cpp
ofile := obj + cgoRe.ReplaceAllString(filepath.Base(file), "_") + ".o"
if err := b.gxx(p, ofile, cxxflags, file); err != nil {
return nil, nil, err
}
outObj = append(outObj, ofile)
}
for _, file := range mfiles {
// Append .o to the file, just in case the pkg has file.c and file.m
ofile := obj + cgoRe.ReplaceAllString(filepath.Base(file), "_") + ".o"
if err := b.gcc(p, ofile, cflags, file); err != nil {
return nil, nil, err
}
outObj = append(outObj, ofile)
}
fflags := stringList(cgoCPPFLAGS, cgoFFLAGS)
for _, file := range ffiles {
// Append .o to the file, just in case the pkg has file.c and file.f
ofile := obj + cgoRe.ReplaceAllString(filepath.Base(file), "_") + ".o"
if err := b.gfortran(p, ofile, fflags, file); err != nil {
return nil, nil, err
}
outObj = append(outObj, ofile)
}
switch buildToolchain.(type) {
case gcToolchain:
importGo := obj + "_cgo_import.go"
if err := b.dynimport(p, obj, importGo, cgoExe, cflags, cgoLDFLAGS, outObj); err != nil {
return nil, nil, err
}
outGo = append(outGo, importGo)
ofile := obj + "_all.o"
if err := b.collect(p, obj, ofile, cgoLDFLAGS, outObj); err != nil {
return nil, nil, err
}
outObj = []string{ofile}
case gccgoToolchain:
defunC := obj + "_cgo_defun.c"
defunObj := obj + "_cgo_defun.o" defunObj := obj + "_cgo_defun.o"
if err := buildToolchain.cc(b, p, obj, defunObj, defunC); err != nil { if err := buildToolchain.cc(b, p, obj, defunObj, defunC); err != nil {
return nil, nil, err return nil, nil, err
} }
outObj = append(outObj, defunObj) outObj = append(outObj, defunObj)
default:
noCompiler()
} }
// gcc return outGo, outObj, nil
var linkobj []string }
var bareLDFLAGS []string // dynimport creates a Go source file named importGo containing
// //go:cgo_import_dynamic directives for each symbol or library
// dynamically imported by the object files outObj.
func (b *builder) dynimport(p *Package, obj, importGo, cgoExe string, cflags, cgoLDFLAGS, outObj []string) error {
cfile := obj + "_cgo_main.c"
ofile := obj + "_cgo_main.o"
if err := b.gcc(p, ofile, cflags, cfile); err != nil {
return err
}
linkobj := stringList(ofile, outObj, p.SysoFiles)
dynobj := obj + "_cgo_.o"
// we need to use -pie for Linux/ARM to get accurate imported sym
ldflags := cgoLDFLAGS
if (goarch == "arm" && goos == "linux") || goos == "android" {
ldflags = append(ldflags, "-pie")
}
if err := b.gccld(p, dynobj, ldflags, linkobj); err != nil {
return err
}
// cgo -dynimport
var cgoflags []string
if p.Standard && p.ImportPath == "runtime/cgo" {
cgoflags = []string{"-dynlinker"} // record path to dynamic linker
}
return b.run(p.Dir, p.ImportPath, nil, buildToolExec, cgoExe, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags)
}
// collect partially links the object files outObj into a single
// relocatable object file named ofile.
func (b *builder) collect(p *Package, obj, ofile string, cgoLDFLAGS, outObj []string) error {
// When linking relocatable objects, various flags need to be // When linking relocatable objects, various flags need to be
// filtered out as they are inapplicable and can cause some linkers // filtered out as they are inapplicable and can cause some linkers
// to fail. // to fail.
var ldflags []string
for i := 0; i < len(cgoLDFLAGS); i++ { for i := 0; i < len(cgoLDFLAGS); i++ {
f := cgoLDFLAGS[i] f := cgoLDFLAGS[i]
switch { switch {
@ -3274,10 +3492,12 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
// skip "-framework X" on Darwin // skip "-framework X" on Darwin
case goos == "darwin" && f == "-framework": case goos == "darwin" && f == "-framework":
i++ i++
// skip "*.{dylib,so,dll}" // skip "*.{dylib,so,dll,o,a}"
case strings.HasSuffix(f, ".dylib"), case strings.HasSuffix(f, ".dylib"),
strings.HasSuffix(f, ".so"), strings.HasSuffix(f, ".so"),
strings.HasSuffix(f, ".dll"): strings.HasSuffix(f, ".dll"),
strings.HasSuffix(f, ".o"),
strings.HasSuffix(f, ".a"):
// Remove any -fsanitize=foo flags. // Remove any -fsanitize=foo flags.
// Otherwise the compiler driver thinks that we are doing final link // Otherwise the compiler driver thinks that we are doing final link
// and links sanitizer runtime into the object file. But we are not doing // and links sanitizer runtime into the object file. But we are not doing
@ -3297,109 +3517,11 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
i++ i++
} }
default: default:
bareLDFLAGS = append(bareLDFLAGS, f) ldflags = append(ldflags, f)
} }
} }
var staticLibs []string ldflags = append(ldflags, "-Wl,-r", "-nostdlib")
if goos == "windows" {
// libmingw32 and libmingwex have some inter-dependencies,
// so must use linker groups.
staticLibs = []string{"-Wl,--start-group", "-lmingwex", "-lmingw32", "-Wl,--end-group"}
}
cflags := stringList(cgoCPPFLAGS, cgoCFLAGS)
for _, cfile := range cfiles {
ofile := obj + cfile[:len(cfile)-1] + "o"
if err := b.gcc(p, ofile, cflags, obj+cfile); err != nil {
return nil, nil, err
}
linkobj = append(linkobj, ofile)
if !strings.HasSuffix(ofile, "_cgo_main.o") {
outObj = append(outObj, ofile)
}
}
for _, file := range gccfiles {
ofile := obj + cgoRe.ReplaceAllString(file[:len(file)-1], "_") + "o"
if err := b.gcc(p, ofile, cflags, file); err != nil {
return nil, nil, err
}
linkobj = append(linkobj, ofile)
outObj = append(outObj, ofile)
}
cxxflags := stringList(cgoCPPFLAGS, cgoCXXFLAGS)
for _, file := range gxxfiles {
// Append .o to the file, just in case the pkg has file.c and file.cpp
ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o"
if err := b.gxx(p, ofile, cxxflags, file); err != nil {
return nil, nil, err
}
linkobj = append(linkobj, ofile)
outObj = append(outObj, ofile)
}
for _, file := range mfiles {
// Append .o to the file, just in case the pkg has file.c and file.m
ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o"
if err := b.gcc(p, ofile, cflags, file); err != nil {
return nil, nil, err
}
linkobj = append(linkobj, ofile)
outObj = append(outObj, ofile)
}
fflags := stringList(cgoCPPFLAGS, cgoFFLAGS)
for _, file := range ffiles {
// Append .o to the file, just in case the pkg has file.c and file.f
ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o"
if err := b.gfortran(p, ofile, fflags, file); err != nil {
return nil, nil, err
}
linkobj = append(linkobj, ofile)
outObj = append(outObj, ofile)
}
linkobj = append(linkobj, p.SysoFiles...)
dynobj := obj + "_cgo_.o"
pie := (goarch == "arm" && goos == "linux") || goos == "android"
if pie { // we need to use -pie for Linux/ARM to get accurate imported sym
cgoLDFLAGS = append(cgoLDFLAGS, "-pie")
}
if err := b.gccld(p, dynobj, cgoLDFLAGS, linkobj); err != nil {
return nil, nil, err
}
if pie { // but we don't need -pie for normal cgo programs
cgoLDFLAGS = cgoLDFLAGS[0 : len(cgoLDFLAGS)-1]
}
if _, ok := buildToolchain.(gccgoToolchain); ok {
// we don't use dynimport when using gccgo.
return outGo, outObj, nil
}
// cgo -dynimport
importGo := obj + "_cgo_import.go"
cgoflags = []string{}
if p.Standard && p.ImportPath == "runtime/cgo" {
cgoflags = append(cgoflags, "-dynlinker") // record path to dynamic linker
}
if err := b.run(p.Dir, p.ImportPath, nil, buildToolExec, cgoExe, "-objdir", obj, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags); err != nil {
return nil, nil, err
}
outGo = append(outGo, importGo)
ofile := obj + "_all.o"
var gccObjs, nonGccObjs []string
for _, f := range outObj {
if strings.HasSuffix(f, ".o") {
gccObjs = append(gccObjs, f)
} else {
nonGccObjs = append(nonGccObjs, f)
}
}
ldflags := stringList(bareLDFLAGS, "-Wl,-r", "-nostdlib", staticLibs)
if b.gccSupportsNoPie() { if b.gccSupportsNoPie() {
ldflags = append(ldflags, "-no-pie") ldflags = append(ldflags, "-no-pie")
@ -3408,16 +3530,7 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
// We are creating an object file, so we don't want a build ID. // We are creating an object file, so we don't want a build ID.
ldflags = b.disableBuildID(ldflags) ldflags = b.disableBuildID(ldflags)
if err := b.gccld(p, ofile, ldflags, gccObjs); err != nil { return b.gccld(p, ofile, ldflags, outObj)
return nil, nil, err
}
// NOTE(rsc): The importObj is a 5c/6c/8c object and on Windows
// must be processed before the gcc-generated objects.
// Put it first. https://golang.org/issue/2601
outObj = stringList(nonGccObjs, ofile)
return outGo, outObj, nil
} }
// Run SWIG on all SWIG input files. // Run SWIG on all SWIG input files.
@ -3570,7 +3683,7 @@ func (b *builder) swigIntSize(obj string) (intsize string, err error) {
// Run SWIG on one SWIG input file. // Run SWIG on one SWIG input file.
func (b *builder) swigOne(p *Package, file, obj string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) { func (b *builder) swigOne(p *Package, file, obj string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) {
cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _ := b.cflags(p, true) cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _ := b.cflags(p)
var cflags []string var cflags []string
if cxx { if cxx {
cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS) cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS)
@ -3633,7 +3746,7 @@ func (b *builder) swigOne(p *Package, file, obj string, pcCFLAGS []string, cxx b
b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig warning b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig warning
} }
return obj + goFile, obj + gccBase + gccExt, nil return goFile, obj + gccBase + gccExt, nil
} }
// disableBuildID adjusts a linker command line to avoid creating a // disableBuildID adjusts a linker command line to avoid creating a
@ -3682,7 +3795,11 @@ func instrumentInit() {
return return
} }
if buildRace && buildMSan { if buildRace && buildMSan {
fmt.Fprintf(os.Stderr, "go %s: may not use -race and -msan simultaneously", flag.Args()[0]) fmt.Fprintf(os.Stderr, "go %s: may not use -race and -msan simultaneously\n", flag.Args()[0])
os.Exit(2)
}
if buildMSan && (goos != "linux" || goarch != "amd64") {
fmt.Fprintf(os.Stderr, "-msan is not supported on %s/%s\n", goos, goarch)
os.Exit(2) os.Exit(2)
} }
if goarch != "amd64" || goos != "linux" && goos != "freebsd" && goos != "darwin" && goos != "windows" { if goarch != "amd64" || goos != "linux" && goos != "freebsd" && goos != "darwin" && goos != "windows" {

View file

@ -0,0 +1,44 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"os"
"reflect"
"testing"
)
func TestRemoveDevNull(t *testing.T) {
fi, err := os.Lstat(os.DevNull)
if err != nil {
t.Skip(err)
}
if fi.Mode().IsRegular() {
t.Errorf("Lstat(%s).Mode().IsRegular() = true; expected false", os.DevNull)
}
mayberemovefile(os.DevNull)
_, err = os.Lstat(os.DevNull)
if err != nil {
t.Errorf("mayberemovefile(%s) did remove it; oops", os.DevNull)
}
}
func TestSplitPkgConfigOutput(t *testing.T) {
for _, test := range []struct {
in []byte
want []string
}{
{[]byte(`-r:foo -L/usr/white\ space/lib -lfoo\ bar -lbar\ baz`), []string{"-r:foo", "-L/usr/white space/lib", "-lfoo bar", "-lbar baz"}},
{[]byte(`-lextra\ fun\ arg\\`), []string{`-lextra fun arg\`}},
{[]byte(`broken flag\`), []string{"broken", "flag"}},
{[]byte("\textra whitespace\r\n"), []string{"extra", "whitespace"}},
{[]byte(" \r\n "), nil},
} {
got := splitPkgConfigOutput(test.in)
if !reflect.DeepEqual(got, test.want) {
t.Errorf("splitPkgConfigOutput(%v) = %v; want %v", test.in, got, test.want)
}
}
}

View file

@ -40,7 +40,7 @@ func mkEnv() []envVar {
{"GOHOSTARCH", runtime.GOARCH}, {"GOHOSTARCH", runtime.GOARCH},
{"GOHOSTOS", runtime.GOOS}, {"GOHOSTOS", runtime.GOOS},
{"GOOS", goos}, {"GOOS", goos},
{"GOPATH", os.Getenv("GOPATH")}, {"GOPATH", buildContext.GOPATH},
{"GORACE", os.Getenv("GORACE")}, {"GORACE", os.Getenv("GORACE")},
{"GOROOT", goroot}, {"GOROOT", goroot},
{"GOTOOLDIR", toolDir}, {"GOTOOLDIR", toolDir},
@ -49,14 +49,25 @@ func mkEnv() []envVar {
{"TERM", "dumb"}, {"TERM", "dumb"},
} }
if goos != "plan9" { if gccgoBin != "" {
cmd := b.gccCmd(".") env = append(env, envVar{"GCCGO", gccgoBin})
env = append(env, envVar{"CC", cmd[0]}) } else {
env = append(env, envVar{"GOGCCFLAGS", strings.Join(cmd[3:], " ")}) env = append(env, envVar{"GCCGO", gccgoName})
cmd = b.gxxCmd(".")
env = append(env, envVar{"CXX", cmd[0]})
} }
switch goarch {
case "arm":
env = append(env, envVar{"GOARM", os.Getenv("GOARM")})
case "386":
env = append(env, envVar{"GO386", os.Getenv("GO386")})
}
cmd := b.gccCmd(".")
env = append(env, envVar{"CC", cmd[0]})
env = append(env, envVar{"GOGCCFLAGS", strings.Join(cmd[3:], " ")})
cmd = b.gxxCmd(".")
env = append(env, envVar{"CXX", cmd[0]})
if buildContext.CgoEnabled { if buildContext.CgoEnabled {
env = append(env, envVar{"CGO_ENABLED", "1"}) env = append(env, envVar{"CGO_ENABLED", "1"})
} else { } else {
@ -75,8 +86,24 @@ func findEnv(env []envVar, name string) string {
return "" return ""
} }
// extraEnvVars returns environment variables that should not leak into child processes.
func extraEnvVars() []envVar {
var b builder
b.init()
cppflags, cflags, cxxflags, fflags, ldflags := b.cflags(&Package{})
return []envVar{
{"PKG_CONFIG", b.pkgconfigCmd()},
{"CGO_CFLAGS", strings.Join(cflags, " ")},
{"CGO_CPPFLAGS", strings.Join(cppflags, " ")},
{"CGO_CXXFLAGS", strings.Join(cxxflags, " ")},
{"CGO_FFLAGS", strings.Join(fflags, " ")},
{"CGO_LDFLAGS", strings.Join(ldflags, " ")},
}
}
func runEnv(cmd *Command, args []string) { func runEnv(cmd *Command, args []string) {
env := mkEnv() env := newEnv
env = append(env, extraEnvVars()...)
if len(args) > 0 { if len(args) > 0 {
for _, name := range args { for _, name := range args {
fmt.Printf("%s\n", findEnv(env, name)) fmt.Printf("%s\n", findEnv(env, name))

View file

@ -25,7 +25,7 @@ var cmdGenerate = &Command{
Long: ` Long: `
Generate runs commands described by directives within existing Generate runs commands described by directives within existing
files. Those commands can run any process but the intent is to files. Those commands can run any process but the intent is to
create or update Go source files, for instance by running yacc. create or update Go source files.
Go generate is never run automatically by go build, go get, go test, Go generate is never run automatically by go build, go get, go test,
and so on. It must be run explicitly. and so on. It must be run explicitly.
@ -88,10 +88,10 @@ string xxx represents the command identified by the arguments. This
can be used to create aliases or to handle multiword generators. can be used to create aliases or to handle multiword generators.
For example, For example,
//go:generate -command yacc go tool yacc //go:generate -command foo go tool foo
specifies that the command "yacc" represents the generator specifies that the command "foo" represents the generator
"go tool yacc". "go tool foo".
Generate processes packages in the order given on the command line, Generate processes packages in the order given on the command line,
one at a time. If the command line lists .go files, they are treated one at a time. If the command line lists .go files, they are treated
@ -136,6 +136,8 @@ func init() {
} }
func runGenerate(cmd *Command, args []string) { func runGenerate(cmd *Command, args []string) {
ignoreImports = true
if generateRunFlag != "" { if generateRunFlag != "" {
var err error var err error
generateRunRE, err = regexp.Compile(generateRunFlag) generateRunRE, err = regexp.Compile(generateRunFlag)

View file

@ -43,11 +43,13 @@ The -u flag instructs get to use the network to update the named packages
and their dependencies. By default, get uses the network to check out and their dependencies. By default, get uses the network to check out
missing packages but does not use it to look for updates to existing packages. missing packages but does not use it to look for updates to existing packages.
The -v flag enables verbose progress and debug output.
Get also accepts build flags to control the installation. See 'go help build'. Get also accepts build flags to control the installation. See 'go help build'.
When checking out a new package, get creates the target directory When checking out a new package, get creates the target directory
GOPATH/src/<import-path>. If the GOPATH contains multiple entries, GOPATH/src/<import-path>. If the GOPATH contains multiple entries,
get uses the first one. See 'go help gopath'. get uses the first one. For more details see: 'go help gopath'.
When checking out or updating a package, get looks for a branch or tag When checking out or updating a package, get looks for a branch or tag
that matches the locally installed version of Go. The most important that matches the locally installed version of Go. The most important
@ -96,13 +98,31 @@ func runGet(cmd *Command, args []string) {
os.Setenv("GIT_TERMINAL_PROMPT", "0") os.Setenv("GIT_TERMINAL_PROMPT", "0")
} }
// Disable any ssh connection pooling by Git.
// If a Git subprocess forks a child into the background to cache a new connection,
// that child keeps stdout/stderr open. After the Git subprocess exits,
// os /exec expects to be able to read from the stdout/stderr pipe
// until EOF to get all the data that the Git subprocess wrote before exiting.
// The EOF doesn't come until the child exits too, because the child
// is holding the write end of the pipe.
// This is unfortunate, but it has come up at least twice
// (see golang.org/issue/13453 and golang.org/issue/16104)
// and confuses users when it does.
// If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
// assume they know what they are doing and don't step on it.
// But default to turning off ControlMaster.
if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no")
}
// Phase 1. Download/update. // Phase 1. Download/update.
var stk importStack var stk importStack
mode := 0 mode := 0
if *getT { if *getT {
mode |= getTestDeps mode |= getTestDeps
} }
for _, arg := range downloadPaths(args) { args = downloadPaths(args)
for _, arg := range args {
download(arg, nil, &stk, mode) download(arg, nil, &stk, mode)
} }
exitIfErrors() exitIfErrors()
@ -137,7 +157,7 @@ func runGet(cmd *Command, args []string) {
return return
} }
runInstall(cmd, args) installPackages(args, true)
} }
// downloadPaths prepares the list of paths to pass to download. // downloadPaths prepares the list of paths to pass to download.
@ -177,7 +197,7 @@ var downloadCache = map[string]bool{}
// downloadRootCache records the version control repository // downloadRootCache records the version control repository
// root directories we have already considered during the download. // root directories we have already considered during the download.
// For example, all the packages in the code.google.com/p/codesearch repo // For example, all the packages in the github.com/google/codesearch repo
// share the same root (the directory for that path), and we only need // share the same root (the directory for that path), and we only need
// to run the hg commands to consider each repository once. // to run the hg commands to consider each repository once.
var downloadRootCache = map[string]bool{} var downloadRootCache = map[string]bool{}
@ -185,6 +205,10 @@ var downloadRootCache = map[string]bool{}
// download runs the download half of the get command // download runs the download half of the get command
// for the package named by the argument. // for the package named by the argument.
func download(arg string, parent *Package, stk *importStack, mode int) { func download(arg string, parent *Package, stk *importStack, mode int) {
if mode&useVendor != 0 {
// Caller is responsible for expanding vendor paths.
panic("internal error: download mode has useVendor set")
}
load := func(path string, mode int) *Package { load := func(path string, mode int) *Package {
if parent == nil { if parent == nil {
return loadPackage(path, stk) return loadPackage(path, stk)
@ -295,33 +319,43 @@ func download(arg string, parent *Package, stk *importStack, mode int) {
} }
// Process dependencies, now that we know what they are. // Process dependencies, now that we know what they are.
for _, path := range p.Imports { imports := p.Imports
if mode&getTestDeps != 0 {
// Process test dependencies when -t is specified.
// (But don't get test dependencies for test dependencies:
// we always pass mode 0 to the recursive calls below.)
imports = stringList(imports, p.TestImports, p.XTestImports)
}
for i, path := range imports {
if path == "C" { if path == "C" {
continue continue
} }
// Don't get test dependencies recursively. // Fail fast on import naming full vendor path.
// Imports is already vendor-expanded. // Otherwise expand path as needed for test imports.
// Note that p.Imports can have additional entries beyond p.build.Imports.
orig := path
if i < len(p.build.Imports) {
orig = p.build.Imports[i]
}
if j, ok := findVendor(orig); ok {
stk.push(path)
err := &PackageError{
ImportStack: stk.copy(),
Err: "must be imported as " + path[j+len("vendor/"):],
}
stk.pop()
errorf("%s", err)
continue
}
// If this is a test import, apply vendor lookup now.
// We cannot pass useVendor to download, because
// download does caching based on the value of path,
// so it must be the fully qualified path already.
if i >= len(p.Imports) {
path = vendoredImportPath(p, path)
}
download(path, p, stk, 0) download(path, p, stk, 0)
} }
if mode&getTestDeps != 0 {
// Process test dependencies when -t is specified.
// (Don't get test dependencies for test dependencies.)
// We pass useVendor here because p.load does not
// vendor-expand TestImports and XTestImports.
// The call to loadImport inside download needs to do that.
for _, path := range p.TestImports {
if path == "C" {
continue
}
download(path, p, stk, useVendor)
}
for _, path := range p.XTestImports {
if path == "C" {
continue
}
download(path, p, stk, useVendor)
}
}
if isWildcard { if isWildcard {
stk.pop() stk.pop()
@ -368,7 +402,7 @@ func downloadPackage(p *Package) error {
repo = resolved repo = resolved
} }
} }
if remote != repo && p.ImportComment != "" { if remote != repo && rr.isCustom {
return fmt.Errorf("%s is a custom import path for %s, but %s is checked out from %s", rr.root, repo, dir, remote) return fmt.Errorf("%s is a custom import path for %s, but %s is checked out from %s", rr.root, repo, dir, remote)
} }
} }
@ -391,12 +425,16 @@ func downloadPackage(p *Package) error {
// Package not found. Put in first directory of $GOPATH. // Package not found. Put in first directory of $GOPATH.
list := filepath.SplitList(buildContext.GOPATH) list := filepath.SplitList(buildContext.GOPATH)
if len(list) == 0 { if len(list) == 0 {
return fmt.Errorf("cannot download, $GOPATH not set. For more details see: go help gopath") return fmt.Errorf("cannot download, $GOPATH not set. For more details see: 'go help gopath'")
} }
// Guard against people setting GOPATH=$GOROOT. // Guard against people setting GOPATH=$GOROOT.
if list[0] == goroot { if list[0] == goroot {
return fmt.Errorf("cannot download, $GOPATH must not be set to $GOROOT. For more details see: go help gopath") return fmt.Errorf("cannot download, $GOPATH must not be set to $GOROOT. For more details see: 'go help gopath'")
} }
if _, err := os.Stat(filepath.Join(list[0], "src/cmd/go/alldocs.go")); err == nil {
return fmt.Errorf("cannot download, %s is a GOROOT, not a GOPATH. For more details see: 'go help gopath'", list[0])
}
p.build.Root = list[0]
p.build.SrcRoot = filepath.Join(list[0], "src") p.build.SrcRoot = filepath.Join(list[0], "src")
p.build.PkgRoot = filepath.Join(list[0], "pkg") p.build.PkgRoot = filepath.Join(list[0], "pkg")
} }
@ -425,11 +463,19 @@ func downloadPackage(p *Package) error {
if _, err := os.Stat(root); err == nil { if _, err := os.Stat(root); err == nil {
return fmt.Errorf("%s exists but %s does not - stale checkout?", root, meta) return fmt.Errorf("%s exists but %s does not - stale checkout?", root, meta)
} }
_, err := os.Stat(p.build.Root)
gopathExisted := err == nil
// Some version control tools require the parent of the target to exist. // Some version control tools require the parent of the target to exist.
parent, _ := filepath.Split(root) parent, _ := filepath.Split(root)
if err = os.MkdirAll(parent, 0777); err != nil { if err = os.MkdirAll(parent, 0777); err != nil {
return err return err
} }
if buildV && !gopathExisted && p.build.Root == buildContext.GOPATH {
fmt.Fprintf(os.Stderr, "created GOPATH=%s; see 'go help gopath'\n", p.build.Root)
}
if err = vcs.create(root, repo); err != nil { if err = vcs.create(root, repo); err != nil {
return err return err
} }

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,7 @@
package main package main
import ( import (
"internal/testenv"
"io/ioutil" "io/ioutil"
"os" "os"
"os/exec" "os/exec"
@ -45,7 +46,7 @@ func TestAbsolutePath(t *testing.T) {
noVolume := file[len(filepath.VolumeName(file)):] noVolume := file[len(filepath.VolumeName(file)):]
wrongPath := filepath.Join(dir, noVolume) wrongPath := filepath.Join(dir, noVolume)
output, err := exec.Command("go", "build", noVolume).CombinedOutput() output, err := exec.Command(testenv.GoToolPath(t), "build", noVolume).CombinedOutput()
if err == nil { if err == nil {
t.Fatal("build should fail") t.Fatal("build should fail")
} }

View file

@ -42,7 +42,7 @@ denotes the package in that directory.
Otherwise, the import path P denotes the package found in Otherwise, the import path P denotes the package found in
the directory DIR/src/P for some DIR listed in the GOPATH the directory DIR/src/P for some DIR listed in the GOPATH
environment variable (see 'go help gopath'). environment variable (For more details see: 'go help gopath').
If no import paths are given, the action applies to the If no import paths are given, the action applies to the
package in the current directory. package in the current directory.
@ -62,6 +62,9 @@ Go library.
- "cmd" expands to the Go repository's commands and their - "cmd" expands to the Go repository's commands and their
internal libraries. internal libraries.
Import paths beginning with "cmd/" only match source code in
the Go repository.
An import path is a pattern if it includes one or more "..." wildcards, An import path is a pattern if it includes one or more "..." wildcards,
each of which can match any string, including the empty string and each of which can match any string, including the empty string and
strings containing slashes. Such a pattern expands to all package strings containing slashes. Such a pattern expands to all package
@ -102,10 +105,10 @@ var helpImportPath = &Command{
Short: "import path syntax", Short: "import path syntax",
Long: ` Long: `
An import path (see 'go help packages') denotes a package An import path (see 'go help packages') denotes a package stored in the local
stored in the local file system. In general, an import path denotes file system. In general, an import path denotes either a standard package (such
either a standard package (such as "unicode/utf8") or a package as "unicode/utf8") or a package found in one of the work spaces (For more
found in one of the work spaces (see 'go help gopath'). details see: 'go help gopath').
Relative import paths Relative import paths
@ -197,6 +200,11 @@ When a version control system supports multiple protocols,
each is tried in turn when downloading. For example, a Git each is tried in turn when downloading. For example, a Git
download tries https://, then git+ssh://. download tries https://, then git+ssh://.
By default, downloads are restricted to known secure protocols
(e.g. https, ssh). To override this setting for Git downloads, the
GIT_ALLOW_PROTOCOL environment variable can be set (For more details see:
'go help environment').
If the import path is not a known code hosting site and also lacks a If the import path is not a known code hosting site and also lacks a
version control qualifier, the go tool attempts to fetch the import version control qualifier, the go tool attempts to fetch the import
over https/http and looks for a <meta> tag in the document's HTML over https/http and looks for a <meta> tag in the document's HTML
@ -237,8 +245,8 @@ the go tool will verify that https://example.org/?go-get=1 contains the
same meta tag and then git clone https://code.org/r/p/exproj into same meta tag and then git clone https://code.org/r/p/exproj into
GOPATH/src/example.org. GOPATH/src/example.org.
New downloaded packages are written to the first directory New downloaded packages are written to the first directory listed in the GOPATH
listed in the GOPATH environment variable (see 'go help gopath'). environment variable (For more details see: 'go help gopath').
The go command attempts to download the version of the The go command attempts to download the version of the
package appropriate for the Go release being used. package appropriate for the Go release being used.
@ -281,8 +289,13 @@ On Unix, the value is a colon-separated string.
On Windows, the value is a semicolon-separated string. On Windows, the value is a semicolon-separated string.
On Plan 9, the value is a list. On Plan 9, the value is a list.
GOPATH must be set to get, build and install packages outside the If the environment variable is unset, GOPATH defaults
standard Go tree. to a subdirectory named "go" in the user's home directory
($HOME/go on Unix, %USERPROFILE%\go on Windows),
unless that directory holds a Go distribution.
Run "go env GOPATH" to see the current GOPATH.
See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH.
Each directory listed in GOPATH must have a prescribed structure: Each directory listed in GOPATH must have a prescribed structure:
@ -310,9 +323,9 @@ of DIR/bin. GOBIN must be an absolute path.
Here's an example directory layout: Here's an example directory layout:
GOPATH=/home/user/gocode GOPATH=/home/user/go
/home/user/gocode/ /home/user/go/
src/ src/
foo/ foo/
bar/ (go code in package bar) bar/ (go code in package bar)
@ -338,7 +351,7 @@ Code in or below a directory named "internal" is importable only
by code in the directory tree rooted at the parent of "internal". by code in the directory tree rooted at the parent of "internal".
Here's an extended version of the directory layout above: Here's an extended version of the directory layout above:
/home/user/gocode/ /home/user/go/
src/ src/
crash/ crash/
bang/ (go code in package bang) bang/ (go code in package bang)
@ -376,7 +389,7 @@ Here's the example from the previous section,
but with the "internal" directory renamed to "vendor" but with the "internal" directory renamed to "vendor"
and a new foo/vendor/crash/bang directory added: and a new foo/vendor/crash/bang directory added:
/home/user/gocode/ /home/user/go/
src/ src/
crash/ crash/
bang/ (go code in package bang) bang/ (go code in package bang)
@ -439,7 +452,7 @@ General-purpose environment variables:
The operating system for which to compile code. The operating system for which to compile code.
Examples are linux, darwin, windows, netbsd. Examples are linux, darwin, windows, netbsd.
GOPATH GOPATH
See 'go help gopath'. For more details see: 'go help gopath'.
GORACE GORACE
Options for the race detector. Options for the race detector.
See https://golang.org/doc/articles/race_detector.html. See https://golang.org/doc/articles/race_detector.html.
@ -461,10 +474,15 @@ Environment variables for use with cgo:
CGO_CXXFLAGS CGO_CXXFLAGS
Flags that cgo will pass to the compiler when compiling Flags that cgo will pass to the compiler when compiling
C++ code. C++ code.
CGO_FFLAGS
Flags that cgo will pass to the compiler when compiling
Fortran code.
CGO_LDFLAGS CGO_LDFLAGS
Flags that cgo will pass to the compiler when linking. Flags that cgo will pass to the compiler when linking.
CXX CXX
The command to use to compile C++ code. The command to use to compile C++ code.
PKG_CONFIG
Path to pkg-config tool.
Architecture-specific environment variables: Architecture-specific environment variables:
@ -486,6 +504,10 @@ Special-purpose environment variables:
Whether the linker should use external linking mode Whether the linker should use external linking mode
when using -linkmode=auto with code that uses cgo. when using -linkmode=auto with code that uses cgo.
Set to 0 to disable external linking mode, 1 to enable it. Set to 0 to disable external linking mode, 1 to enable it.
GIT_ALLOW_PROTOCOL
Defined by Git. A colon-separated list of schemes that are allowed to be used
with git fetch/clone. If set, any scheme not explicitly mentioned will be
considered insecure by 'go get'.
`, `,
} }
@ -577,5 +599,9 @@ are:
Build the listed main packages and everything they import into Build the listed main packages and everything they import into
position independent executables (PIE). Packages not named position independent executables (PIE). Packages not named
main are ignored. main are ignored.
-buildmode=plugin
Build the listed main packages, plus all packages that they
import, into a Go plugin. Packages not named main are ignored.
`, `,
} }

View file

@ -12,6 +12,7 @@
package main package main
import ( import (
"cmd/internal/browser"
"crypto/tls" "crypto/tls"
"fmt" "fmt"
"io" "io"
@ -32,6 +33,7 @@ var httpClient = http.DefaultClient
var impatientInsecureHTTPClient = &http.Client{ var impatientInsecureHTTPClient = &http.Client{
Timeout: 5 * time.Second, Timeout: 5 * time.Second,
Transport: &http.Transport{ Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{ TLSClientConfig: &tls.Config{
InsecureSkipVerify: true, InsecureSkipVerify: true,
}, },
@ -113,3 +115,6 @@ func httpsOrHTTP(importPath string, security securityMode) (urlStr string, body
} }
return urlStr, res.Body, nil return urlStr, res.Body, nil
} }
func queryEscape(s string) string { return url.QueryEscape(s) }
func openBrowser(url string) bool { return browser.Open(url) }

View file

@ -59,6 +59,8 @@ syntax of package template. The default output is equivalent to -f
SwigFiles []string // .swig files SwigFiles []string // .swig files
SwigCXXFiles []string // .swigcxx files SwigCXXFiles []string // .swigcxx files
SysoFiles []string // .syso object files to add to archive SysoFiles []string // .syso object files to add to archive
TestGoFiles []string // _test.go files in package
XTestGoFiles []string // _test.go files outside package
// Cgo directives // Cgo directives
CgoCFLAGS []string // cgo: flags for C compiler CgoCFLAGS []string // cgo: flags for C compiler
@ -69,20 +71,23 @@ syntax of package template. The default output is equivalent to -f
CgoPkgConfig []string // cgo: pkg-config names CgoPkgConfig []string // cgo: pkg-config names
// Dependency information // Dependency information
Imports []string // import paths used by this package Imports []string // import paths used by this package
Deps []string // all (recursively) imported dependencies Deps []string // all (recursively) imported dependencies
TestImports []string // imports from TestGoFiles
XTestImports []string // imports from XTestGoFiles
// Error information // Error information
Incomplete bool // this package or a dependency has an error Incomplete bool // this package or a dependency has an error
Error *PackageError // error loading package Error *PackageError // error loading package
DepsErrors []*PackageError // errors loading dependencies DepsErrors []*PackageError // errors loading dependencies
TestGoFiles []string // _test.go files in package
TestImports []string // imports from TestGoFiles
XTestGoFiles []string // _test.go files outside package
XTestImports []string // imports from XTestGoFiles
} }
Packages stored in vendor directories report an ImportPath that includes the
path to the vendor directory (for example, "d/vendor/p" instead of "p"),
so that the ImportPath uniquely identifies a given copy of a package.
The Imports, Deps, TestImports, and XTestImports lists also contain these
expanded imports paths. See golang.org/s/go15vendor for more about vendoring.
The error information, if any, is The error information, if any, is
type PackageError struct { type PackageError struct {

View file

@ -79,6 +79,7 @@ var commands = []*Command{
cmdClean, cmdClean,
cmdDoc, cmdDoc,
cmdEnv, cmdEnv,
cmdBug,
cmdFix, cmdFix,
cmdFmt, cmdFmt,
cmdGenerate, cmdGenerate,
@ -114,6 +115,7 @@ func setExitStatus(n int) {
} }
var origEnv []string var origEnv []string
var newEnv []envVar
func main() { func main() {
_ = go11tag _ = go11tag
@ -134,7 +136,7 @@ func main() {
// Diagnose common mistake: GOPATH==GOROOT. // Diagnose common mistake: GOPATH==GOROOT.
// This setting is equivalent to not setting GOPATH at all, // This setting is equivalent to not setting GOPATH at all,
// which is not what most people want when they do it. // which is not what most people want when they do it.
if gopath := os.Getenv("GOPATH"); gopath == runtime.GOROOT() { if gopath := buildContext.GOPATH; gopath == runtime.GOROOT() {
fmt.Fprintf(os.Stderr, "warning: GOPATH set to GOROOT (%s) has no effect\n", gopath) fmt.Fprintf(os.Stderr, "warning: GOPATH set to GOROOT (%s) has no effect\n", gopath)
} else { } else {
for _, p := range filepath.SplitList(gopath) { for _, p := range filepath.SplitList(gopath) {
@ -146,7 +148,7 @@ func main() {
os.Exit(2) os.Exit(2)
} }
if !filepath.IsAbs(p) { if !filepath.IsAbs(p) {
fmt.Fprintf(os.Stderr, "go: GOPATH entry is relative; must be absolute path: %q.\nRun 'go help gopath' for usage.\n", p) fmt.Fprintf(os.Stderr, "go: GOPATH entry is relative; must be absolute path: %q.\nFor more details see: 'go help gopath'\n", p)
os.Exit(2) os.Exit(2)
} }
} }
@ -163,7 +165,8 @@ func main() {
// but in practice there might be skew // but in practice there might be skew
// This makes sure we all agree. // This makes sure we all agree.
origEnv = os.Environ() origEnv = os.Environ()
for _, env := range mkEnv() { newEnv = mkEnv()
for _, env := range newEnv {
if os.Getenv(env.name) != env.value { if os.Getenv(env.name) != env.value {
os.Setenv(env.name, env.value) os.Setenv(env.name, env.value)
} }

View file

@ -24,6 +24,8 @@ import (
"unicode" "unicode"
) )
var ignoreImports bool // control whether we ignore imports in packages
// A Package describes a single package found in a directory. // A Package describes a single package found in a directory.
type Package struct { type Package struct {
// Note: These fields are part of the go command's public API. // Note: These fields are part of the go command's public API.
@ -180,11 +182,18 @@ func (p *Package) copyBuild(pp *build.Package) {
p.CgoCXXFLAGS = pp.CgoCXXFLAGS p.CgoCXXFLAGS = pp.CgoCXXFLAGS
p.CgoLDFLAGS = pp.CgoLDFLAGS p.CgoLDFLAGS = pp.CgoLDFLAGS
p.CgoPkgConfig = pp.CgoPkgConfig p.CgoPkgConfig = pp.CgoPkgConfig
p.Imports = pp.Imports // We modify p.Imports in place, so make copy now.
p.Imports = make([]string, len(pp.Imports))
copy(p.Imports, pp.Imports)
p.TestGoFiles = pp.TestGoFiles p.TestGoFiles = pp.TestGoFiles
p.TestImports = pp.TestImports p.TestImports = pp.TestImports
p.XTestGoFiles = pp.XTestGoFiles p.XTestGoFiles = pp.XTestGoFiles
p.XTestImports = pp.XTestImports p.XTestImports = pp.XTestImports
if ignoreImports {
p.Imports = nil
p.TestImports = nil
p.XTestImports = nil
}
} }
// isStandardImportPath reports whether $GOROOT/src/path should be considered // isStandardImportPath reports whether $GOROOT/src/path should be considered
@ -338,62 +347,98 @@ func loadImport(path, srcDir string, parent *Package, stk *importStack, importPo
importPath = path importPath = path
} }
if p := packageCache[importPath]; p != nil { p := packageCache[importPath]
if perr := disallowInternal(srcDir, p, stk); perr != p { if p != nil {
return perr p = reusePackage(p, stk)
} else {
p = new(Package)
p.local = isLocal
p.ImportPath = importPath
packageCache[importPath] = p
// Load package.
// Import always returns bp != nil, even if an error occurs,
// in order to return partial information.
//
// TODO: After Go 1, decide when to pass build.AllowBinary here.
// See issue 3268 for mistakes to avoid.
buildMode := build.ImportComment
if mode&useVendor == 0 || path != origPath {
// Not vendoring, or we already found the vendored path.
buildMode |= build.IgnoreVendor
} }
if mode&useVendor != 0 { bp, err := buildContext.Import(path, srcDir, buildMode)
if perr := disallowVendor(srcDir, origPath, p, stk); perr != p { bp.ImportPath = importPath
return perr if gobin != "" {
bp.BinDir = gobin
}
if err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path &&
!strings.Contains(path, "/vendor/") && !strings.HasPrefix(path, "vendor/") {
err = fmt.Errorf("code in directory %s expects import %q", bp.Dir, bp.ImportComment)
}
p.load(stk, bp, err)
if p.Error != nil && p.Error.Pos == "" {
p = setErrorPos(p, importPos)
}
if origPath != cleanImport(origPath) {
p.Error = &PackageError{
ImportStack: stk.copy(),
Err: fmt.Sprintf("non-canonical import path: %q should be %q", origPath, pathpkg.Clean(origPath)),
} }
p.Incomplete = true
} }
return reusePackage(p, stk)
} }
p := new(Package) // Checked on every import because the rules depend on the code doing the importing.
p.local = isLocal if perr := disallowInternal(srcDir, p, stk); perr != p {
p.ImportPath = importPath return setErrorPos(perr, importPos)
packageCache[importPath] = p }
if mode&useVendor != 0 {
if perr := disallowVendor(srcDir, origPath, p, stk); perr != p {
return setErrorPos(perr, importPos)
}
}
// Load package. if p.Name == "main" && parent != nil && parent.Dir != p.Dir {
// Import always returns bp != nil, even if an error occurs, perr := *p
// in order to return partial information. perr.Error = &PackageError{
// ImportStack: stk.copy(),
// TODO: After Go 1, decide when to pass build.AllowBinary here. Err: fmt.Sprintf("import %q is a program, not an importable package", path),
// See issue 3268 for mistakes to avoid. }
buildMode := build.ImportComment return setErrorPos(&perr, importPos)
if mode&useVendor == 0 || path != origPath {
// Not vendoring, or we already found the vendored path.
buildMode |= build.IgnoreVendor
} }
bp, err := buildContext.Import(path, srcDir, buildMode)
bp.ImportPath = importPath if p.local && parent != nil && !parent.local {
if gobin != "" { perr := *p
bp.BinDir = gobin perr.Error = &PackageError{
ImportStack: stk.copy(),
Err: fmt.Sprintf("local import %q in non-local package", path),
}
return setErrorPos(&perr, importPos)
} }
if err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path &&
!strings.Contains(path, "/vendor/") && !strings.HasPrefix(path, "vendor/") { return p
err = fmt.Errorf("code in directory %s expects import %q", bp.Dir, bp.ImportComment) }
}
p.load(stk, bp, err) func setErrorPos(p *Package, importPos []token.Position) *Package {
if p.Error != nil && p.Error.Pos == "" && len(importPos) > 0 { if len(importPos) > 0 {
pos := importPos[0] pos := importPos[0]
pos.Filename = shortPath(pos.Filename) pos.Filename = shortPath(pos.Filename)
p.Error.Pos = pos.String() p.Error.Pos = pos.String()
} }
if perr := disallowInternal(srcDir, p, stk); perr != p {
return perr
}
if mode&useVendor != 0 {
if perr := disallowVendor(srcDir, origPath, p, stk); perr != p {
return perr
}
}
return p return p
} }
func cleanImport(path string) string {
orig := path
path = pathpkg.Clean(path)
if strings.HasPrefix(orig, "./") && path != ".." && path != "." && !strings.HasPrefix(path, "../") {
path = "./" + path
}
return path
}
var isDirCache = map[string]bool{} var isDirCache = map[string]bool{}
func isDir(path string) bool { func isDir(path string) bool {
@ -419,13 +464,26 @@ func vendoredImportPath(parent *Package, path string) (found string) {
dir := filepath.Clean(parent.Dir) dir := filepath.Clean(parent.Dir)
root := filepath.Join(parent.Root, "src") root := filepath.Join(parent.Root, "src")
if !hasFilePathPrefix(dir, root) { if !hasFilePathPrefix(dir, root) || parent.ImportPath != "command-line-arguments" && filepath.Join(root, parent.ImportPath) != dir {
// Look for symlinks before reporting error. // Look for symlinks before reporting error.
dir = expandPath(dir) dir = expandPath(dir)
root = expandPath(root) root = expandPath(root)
} }
if !hasFilePathPrefix(dir, root) || len(dir) <= len(root) || dir[len(root)] != filepath.Separator {
fatalf("invalid vendoredImportPath: dir=%q root=%q separator=%q", dir, root, string(filepath.Separator)) if !hasFilePathPrefix(dir, root) || len(dir) <= len(root) || dir[len(root)] != filepath.Separator || parent.ImportPath != "command-line-arguments" && !parent.local && filepath.Join(root, parent.ImportPath) != dir {
fatalf("unexpected directory layout:\n"+
" import path: %s\n"+
" root: %s\n"+
" dir: %s\n"+
" expand root: %s\n"+
" expand dir: %s\n"+
" separator: %s",
parent.ImportPath,
filepath.Join(parent.Root, "src"),
filepath.Clean(parent.Dir),
root,
dir,
string(filepath.Separator))
} }
vpath := "vendor/" + path vpath := "vendor/" + path
@ -523,6 +581,14 @@ func disallowInternal(srcDir string, p *Package, stk *importStack) *Package {
return p return p
} }
// The generated 'testmain' package is allowed to access testing/internal/...,
// as if it were generated into the testing directory tree
// (it's actually in a temporary directory outside any Go tree).
// This cleans up a former kludge in passing functionality to the testing package.
if strings.HasPrefix(p.ImportPath, "testing/internal") && len(*stk) >= 2 && (*stk)[len(*stk)-2] == "testmain" {
return p
}
// We can't check standard packages with gccgo. // We can't check standard packages with gccgo.
if buildContext.Compiler == "gccgo" && p.Standard { if buildContext.Compiler == "gccgo" && p.Standard {
return p return p
@ -700,24 +766,23 @@ const (
// goTools is a map of Go program import path to install target directory. // goTools is a map of Go program import path to install target directory.
var goTools = map[string]targetDir{ var goTools = map[string]targetDir{
"cmd/addr2line": toTool, "cmd/addr2line": toTool,
"cmd/api": toTool, "cmd/api": toTool,
"cmd/asm": toTool, "cmd/asm": toTool,
"cmd/compile": toTool, "cmd/compile": toTool,
"cmd/cgo": toTool, "cmd/cgo": toTool,
"cmd/cover": toTool, "cmd/cover": toTool,
"cmd/dist": toTool, "cmd/dist": toTool,
"cmd/doc": toTool, "cmd/doc": toTool,
"cmd/fix": toTool, "cmd/fix": toTool,
"cmd/link": toTool, "cmd/link": toTool,
"cmd/newlink": toTool, "cmd/newlink": toTool,
"cmd/nm": toTool, "cmd/nm": toTool,
"cmd/objdump": toTool, "cmd/objdump": toTool,
"cmd/pack": toTool, "cmd/pack": toTool,
"cmd/pprof": toTool, "cmd/pprof": toTool,
"cmd/trace": toTool, "cmd/trace": toTool,
"cmd/vet": toTool, "cmd/vet": toTool,
"cmd/yacc": toTool,
"code.google.com/p/go.tools/cmd/cover": stalePath, "code.google.com/p/go.tools/cmd/cover": stalePath,
"code.google.com/p/go.tools/cmd/godoc": stalePath, "code.google.com/p/go.tools/cmd/godoc": stalePath,
"code.google.com/p/go.tools/cmd/vet": stalePath, "code.google.com/p/go.tools/cmd/vet": stalePath,
@ -792,7 +857,7 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
useBindir := p.Name == "main" useBindir := p.Name == "main"
if !p.Standard { if !p.Standard {
switch buildBuildmode { switch buildBuildmode {
case "c-archive", "c-shared": case "c-archive", "c-shared", "plugin":
useBindir = false useBindir = false
} }
} }
@ -867,11 +932,25 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
importPaths = append(importPaths, "syscall") importPaths = append(importPaths, "syscall")
} }
// Currently build modes c-shared, pie, and -linkshared force if buildContext.CgoEnabled && p.Name == "main" && !p.Goroot {
// external linking mode, and external linking mode forces an // Currently build modes c-shared, pie (on systems that do not
// import of runtime/cgo. // support PIE with internal linking mode), plugin, and
if p.Name == "main" && !p.Goroot && (buildBuildmode == "c-shared" || buildBuildmode == "pie" || buildLinkshared) { // -linkshared force external linking mode, as of course does
importPaths = append(importPaths, "runtime/cgo") // -ldflags=-linkmode=external. External linking mode forces
// an import of runtime/cgo.
pieCgo := buildBuildmode == "pie" && (buildContext.GOOS != "linux" || buildContext.GOARCH != "amd64")
linkmodeExternal := false
for i, a := range buildLdflags {
if a == "-linkmode=external" {
linkmodeExternal = true
}
if a == "-linkmode" && i+1 < len(buildLdflags) && buildLdflags[i+1] == "external" {
linkmodeExternal = true
}
}
if buildBuildmode == "c-shared" || buildBuildmode == "plugin" || pieCgo || buildLinkshared || linkmodeExternal {
importPaths = append(importPaths, "runtime/cgo")
}
} }
// Everything depends on runtime, except runtime, its internal // Everything depends on runtime, except runtime, its internal
@ -891,6 +970,10 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
if p.Name == "main" && goarch == "arm" { if p.Name == "main" && goarch == "arm" {
importPaths = append(importPaths, "math") importPaths = append(importPaths, "math")
} }
// In coverage atomic mode everything depends on sync/atomic.
if testCoverMode == "atomic" && (!p.Standard || (p.ImportPath != "runtime/cgo" && p.ImportPath != "runtime/race" && p.ImportPath != "sync/atomic")) {
importPaths = append(importPaths, "sync/atomic")
}
} }
// Runtime and its internal packages depend on runtime/internal/sys, // Runtime and its internal packages depend on runtime/internal/sys,
@ -953,6 +1036,16 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
// Build list of imported packages and full dependency list. // Build list of imported packages and full dependency list.
imports := make([]*Package, 0, len(p.Imports)) imports := make([]*Package, 0, len(p.Imports))
deps := make(map[string]*Package) deps := make(map[string]*Package)
save := func(path string, p1 *Package) {
// The same import path could produce an error or not,
// depending on what tries to import it.
// Prefer to record entries with errors, so we can report them.
p0 := deps[path]
if p0 == nil || p1.Error != nil && (p0.Error == nil || len(p0.Error.ImportStack) > len(p1.Error.ImportStack)) {
deps[path] = p1
}
}
for i, path := range importPaths { for i, path := range importPaths {
if path == "C" { if path == "C" {
continue continue
@ -961,28 +1054,6 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
if !reqStdPkgSrc && p1.Standard { if !reqStdPkgSrc && p1.Standard {
continue continue
} }
if p1.Name == "main" {
p.Error = &PackageError{
ImportStack: stk.copy(),
Err: fmt.Sprintf("import %q is a program, not an importable package", path),
}
pos := p.build.ImportPos[path]
if len(pos) > 0 {
p.Error.Pos = pos[0].String()
}
}
if p1.local {
if !p.local && p.Error == nil {
p.Error = &PackageError{
ImportStack: stk.copy(),
Err: fmt.Sprintf("local import %q in non-local package", path),
}
pos := p.build.ImportPos[path]
if len(pos) > 0 {
p.Error.Pos = pos[0].String()
}
}
}
if p.Standard && p.Error == nil && !p1.Standard && p1.Error == nil { if p.Standard && p.Error == nil && !p1.Standard && p1.Error == nil {
p.Error = &PackageError{ p.Error = &PackageError{
ImportStack: stk.copy(), ImportStack: stk.copy(),
@ -999,15 +1070,11 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
if i < len(p.Imports) { if i < len(p.Imports) {
p.Imports[i] = path p.Imports[i] = path
} }
deps[path] = p1
save(path, p1)
imports = append(imports, p1) imports = append(imports, p1)
for _, dep := range p1.deps { for _, dep := range p1.deps {
// The same import path could produce an error or not, save(dep.ImportPath, dep)
// depending on what tries to import it.
// Prefer to record entries with errors, so we can report them.
if deps[dep.ImportPath] == nil || dep.Error != nil {
deps[dep.ImportPath] = dep
}
} }
if p1.Incomplete { if p1.Incomplete {
p.Incomplete = true p.Incomplete = true

View file

@ -135,28 +135,11 @@ const testFlag2 = `
By default, no benchmarks run. To run all benchmarks, By default, no benchmarks run. To run all benchmarks,
use '-bench .' or '-bench=.'. use '-bench .' or '-bench=.'.
-benchmem
Print memory allocation statistics for benchmarks.
-benchtime t -benchtime t
Run enough iterations of each benchmark to take t, specified Run enough iterations of each benchmark to take t, specified
as a time.Duration (for example, -benchtime 1h30s). as a time.Duration (for example, -benchtime 1h30s).
The default is 1 second (1s). The default is 1 second (1s).
-blockprofile block.out
Write a goroutine blocking profile to the specified file
when all tests are complete.
Writes test binary as -c would.
-blockprofilerate n
Control the detail provided in goroutine blocking profiles by
calling runtime.SetBlockProfileRate with n.
See 'go doc runtime.SetBlockProfileRate'.
The profiler aims to sample, on average, one blocking event every
n nanoseconds the program spends blocked. By default,
if -test.blockprofile is set without this flag, all blocking events
are recorded, equivalent to -test.blockprofilerate=1.
-count n -count n
Run each test and benchmark n times (default 1). Run each test and benchmark n times (default 1).
If -cpu is set, run n times for each GOMAXPROCS value. If -cpu is set, run n times for each GOMAXPROCS value.
@ -182,33 +165,11 @@ const testFlag2 = `
Packages are specified as import paths. Packages are specified as import paths.
Sets -cover. Sets -cover.
-coverprofile cover.out
Write a coverage profile to the file after all tests have passed.
Sets -cover.
-cpu 1,2,4 -cpu 1,2,4
Specify a list of GOMAXPROCS values for which the tests or Specify a list of GOMAXPROCS values for which the tests or
benchmarks should be executed. The default is the current value benchmarks should be executed. The default is the current value
of GOMAXPROCS. of GOMAXPROCS.
-cpuprofile cpu.out
Write a CPU profile to the specified file before exiting.
Writes test binary as -c would.
-memprofile mem.out
Write a memory profile to the file after all tests have passed.
Writes test binary as -c would.
-memprofilerate n
Enable more precise (and expensive) memory profiles by setting
runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'.
To profile all memory allocations, use -test.memprofilerate=1
and pass --alloc_space flag to the pprof tool.
-outputdir directory
Place output files from profiling in the specified directory,
by default the directory in which "go test" is running.
-parallel n -parallel n
Allow parallel execution of test functions that call t.Parallel. Allow parallel execution of test functions that call t.Parallel.
The value of this flag is the maximum number of tests to run The value of this flag is the maximum number of tests to run
@ -234,13 +195,64 @@ const testFlag2 = `
If a test runs longer than t, panic. If a test runs longer than t, panic.
The default is 10 minutes (10m). The default is 10 minutes (10m).
-trace trace.out
Write an execution trace to the specified file before exiting.
-v -v
Verbose output: log all tests as they are run. Also print all Verbose output: log all tests as they are run. Also print all
text from Log and Logf calls even if the test succeeds. text from Log and Logf calls even if the test succeeds.
The following flags are also recognized by 'go test' and can be used to
profile the tests during execution:
-benchmem
Print memory allocation statistics for benchmarks.
-blockprofile block.out
Write a goroutine blocking profile to the specified file
when all tests are complete.
Writes test binary as -c would.
-blockprofilerate n
Control the detail provided in goroutine blocking profiles by
calling runtime.SetBlockProfileRate with n.
See 'go doc runtime.SetBlockProfileRate'.
The profiler aims to sample, on average, one blocking event every
n nanoseconds the program spends blocked. By default,
if -test.blockprofile is set without this flag, all blocking events
are recorded, equivalent to -test.blockprofilerate=1.
-coverprofile cover.out
Write a coverage profile to the file after all tests have passed.
Sets -cover.
-cpuprofile cpu.out
Write a CPU profile to the specified file before exiting.
Writes test binary as -c would.
-memprofile mem.out
Write a memory profile to the file after all tests have passed.
Writes test binary as -c would.
-memprofilerate n
Enable more precise (and expensive) memory profiles by setting
runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'.
To profile all memory allocations, use -test.memprofilerate=1
and pass --alloc_space flag to the pprof tool.
-mutexprofile mutex.out
Write a mutex contention profile to the specified file
when all tests are complete.
Writes test binary as -c would.
-mutexprofilefraction n
Sample 1 in n stack traces of goroutines holding a
contended mutex.
-outputdir directory
Place output files from profiling in the specified directory,
by default the directory in which "go test" is running.
-trace trace.out
Write an execution trace to the specified file before exiting.
Each of these flags is also recognized with an optional 'test.' prefix, Each of these flags is also recognized with an optional 'test.' prefix,
as in -test.v. When invoking the generated test binary (the result of as in -test.v. When invoking the generated test binary (the result of
'go test -c') directly, however, the prefix is mandatory. 'go test -c') directly, however, the prefix is mandatory.
@ -322,7 +334,8 @@ If the last comment in the function starts with "Output:" then the output
is compared exactly against the comment (see examples below). If the last is compared exactly against the comment (see examples below). If the last
comment begins with "Unordered output:" then the output is compared to the comment begins with "Unordered output:" then the output is compared to the
comment, however the order of the lines is ignored. An example with no such comment, however the order of the lines is ignored. An example with no such
comment, or with no text after "Output:" is compiled but not executed. comment is compiled but not executed. An example with no text after
"Output:" is compiled, executed, and expected to produce no output.
Godoc displays the body of ExampleXXX to demonstrate the use Godoc displays the body of ExampleXXX to demonstrate the use
of the function, constant, or variable XXX. An example of a method M with of the function, constant, or variable XXX. An example of a method M with
@ -381,9 +394,9 @@ var (
var testMainDeps = map[string]bool{ var testMainDeps = map[string]bool{
// Dependencies for testmain. // Dependencies for testmain.
"testing": true, "testing": true,
"regexp": true, "testing/internal/testdeps": true,
"os": true, "os": true,
} }
func runTest(cmd *Command, args []string) { func runTest(cmd *Command, args []string) {
@ -432,6 +445,11 @@ func runTest(cmd *Command, args []string) {
testStreamOutput = len(pkgArgs) == 0 || testBench || testStreamOutput = len(pkgArgs) == 0 || testBench ||
(testShowPass && (len(pkgs) == 1 || buildP == 1)) (testShowPass && (len(pkgs) == 1 || buildP == 1))
// For 'go test -i -o x.test', we want to build x.test. Imply -c to make the logic easier.
if buildI && testO != "" {
testC = true
}
var b builder var b builder
b.init() b.init()
@ -861,7 +879,7 @@ func (b *builder) test(p *Package) (buildAction, runAction, printAction *action,
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
if len(ptest.GoFiles) > 0 { if len(ptest.GoFiles)+len(ptest.CgoFiles) > 0 {
pmain.imports = append(pmain.imports, ptest) pmain.imports = append(pmain.imports, ptest)
t.ImportTest = true t.ImportTest = true
} }
@ -1089,6 +1107,8 @@ func declareCoverVars(importPath string, files ...string) map[string]*CoverVar {
return coverVars return coverVars
} }
var noTestsToRun = []byte("\ntesting: warning: no tests to run\n")
// runTest is the action for running a test binary. // runTest is the action for running a test binary.
func (b *builder) runTest(a *action) error { func (b *builder) runTest(a *action) error {
args := stringList(findExecCmd(), a.deps[0].target, testArgs) args := stringList(findExecCmd(), a.deps[0].target, testArgs)
@ -1179,10 +1199,14 @@ func (b *builder) runTest(a *action) error {
out := buf.Bytes() out := buf.Bytes()
t := fmt.Sprintf("%.3fs", time.Since(t0).Seconds()) t := fmt.Sprintf("%.3fs", time.Since(t0).Seconds())
if err == nil { if err == nil {
norun := ""
if testShowPass { if testShowPass {
a.testOutput.Write(out) a.testOutput.Write(out)
} }
fmt.Fprintf(a.testOutput, "ok \t%s\t%s%s\n", a.p.ImportPath, t, coveragePercentage(out)) if bytes.HasPrefix(out, noTestsToRun[1:]) || bytes.Contains(out, noTestsToRun) {
norun = " [no tests to run]"
}
fmt.Fprintf(a.testOutput, "ok \t%s\t%s%s%s\n", a.p.ImportPath, t, coveragePercentage(out), norun)
return nil return nil
} }
@ -1406,7 +1430,7 @@ func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error {
} }
} }
ex := doc.Examples(f) ex := doc.Examples(f)
sort.Sort(byOrder(ex)) sort.Slice(ex, func(i, j int) bool { return ex[i].Order < ex[j].Order })
for _, e := range ex { for _, e := range ex {
*doImport = true // import test file whether executed or not *doImport = true // import test file whether executed or not
if e.Output == "" && !e.EmptyOutput { if e.Output == "" && !e.EmptyOutput {
@ -1428,12 +1452,6 @@ func checkTestFunc(fn *ast.FuncDecl, arg string) error {
return nil return nil
} }
type byOrder []*doc.Example
func (x byOrder) Len() int { return len(x) }
func (x byOrder) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byOrder) Less(i, j int) bool { return x[i].Order < x[j].Order }
var testmainTmpl = template.Must(template.New("main").Parse(` var testmainTmpl = template.Must(template.New("main").Parse(`
package main package main
@ -1441,8 +1459,8 @@ import (
{{if not .TestMain}} {{if not .TestMain}}
"os" "os"
{{end}} {{end}}
"regexp"
"testing" "testing"
"testing/internal/testdeps"
{{if .ImportTest}} {{if .ImportTest}}
{{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}} {{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}}
@ -1477,20 +1495,6 @@ var examples = []testing.InternalExample{
{{end}} {{end}}
} }
var matchPat string
var matchRe *regexp.Regexp
func matchString(pat, str string) (result bool, err error) {
if matchRe == nil || matchPat != pat {
matchPat = pat
matchRe, err = regexp.Compile(matchPat)
if err != nil {
return
}
}
return matchRe.MatchString(str), nil
}
{{if .CoverEnabled}} {{if .CoverEnabled}}
// Only updated by init functions, so no need for atomicity. // Only updated by init functions, so no need for atomicity.
@ -1539,7 +1543,7 @@ func main() {
CoveredPackages: {{printf "%q" .Covered}}, CoveredPackages: {{printf "%q" .Covered}},
}) })
{{end}} {{end}}
m := testing.MainStart(matchString, tests, benchmarks, examples) m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)
{{with .TestMain}} {{with .TestMain}}
{{.Package}}.{{.Name}}(m) {{.Package}}.{{.Name}}(m)
{{else}} {{else}}

View file

@ -0,0 +1,3 @@
package a
import _ "c"

View file

@ -0,0 +1 @@
package c

View file

@ -0,0 +1,3 @@
package b
import _ "canonical/a/"

View file

@ -0,0 +1,3 @@
package d
import _ "canonical/b"

View file

@ -0,0 +1,19 @@
package p
/*
void
f(void)
{
}
*/
import "C"
var b bool
func F() {
if b {
for {
}
}
C.f()
}

View file

@ -0,0 +1,10 @@
package p_test
import (
. "cgocover2"
"testing"
)
func TestF(t *testing.T) {
F()
}

View file

@ -0,0 +1,19 @@
package p
/*
void
f(void)
{
}
*/
import "C"
var b bool
func F() {
if b {
for {
}
}
C.f()
}

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1,10 @@
package p_test
import (
. "cgocover3"
"testing"
)
func TestF(t *testing.T) {
F()
}

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1,19 @@
package p
/*
void
f(void)
{
}
*/
import "C"
var b bool
func F() {
if b {
for {
}
}
C.f()
}

View file

@ -0,0 +1,10 @@
package p_test
import (
. "cgocover4"
"testing"
)
func TestF(t *testing.T) {
F()
}

View file

@ -0,0 +1,8 @@
package main
import (
_ "dupload/p2"
_ "p"
)
func main() {}

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1,3 @@
package p2
import _ "dupload/vendor/p"

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1 @@
package p_test

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1 @@
package p_test

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1 @@
package p

View file

@ -0,0 +1 @@
package p_test

View file

@ -0,0 +1 @@
package p_test

View file

@ -0,0 +1,5 @@
//go:generate echo hello world
package gencycle
import _ "gencycle"

View file

@ -0,0 +1,5 @@
package main
import _ "importmain/test"
func main() {}

View file

@ -0,0 +1 @@
package test

View file

@ -0,0 +1,6 @@
package test_test
import "testing"
import _ "importmain/ismain"
func TestCase(t *testing.T) {}

View file

@ -0,0 +1,7 @@
package main
import "my.pkg"
func main() {
println(pkg.Text)
}

View file

@ -0,0 +1,3 @@
package pkg
var Text = "unset"

View file

@ -0,0 +1,29 @@
package testrace
import "testing"
func TestRace(t *testing.T) {
for i := 0; i < 10; i++ {
c := make(chan int)
x := 1
go func() {
x = 2
c <- 1
}()
x = 3
<-c
}
}
func BenchmarkRace(b *testing.B) {
for i := 0; i < b.N; i++ {
c := make(chan int)
x := 1
go func() {
x = 2
c <- 1
}()
x = 3
<-c
}
}

View file

@ -0,0 +1,6 @@
package standalone_benchmark
import "testing"
func Benchmark(b *testing.B) {
}

Some files were not shown because too many files have changed in this diff Show more