diff --git a/block/block.go b/block/block.go deleted file mode 100644 index 382ac50..0000000 --- a/block/block.go +++ /dev/null @@ -1,113 +0,0 @@ -package block - -import ( - "blockchain/transaction" - "blockchain/util" - - "crypto/sha256" - "encoding/hex" - "fmt" - "strings" - "sync" - "time" -) - -const difficulty = 1 - -type Block struct { - //header - Version uint32 - Timestamp string - PrevHash []byte - Difficulty uint32 - Nonce uint32 //The solution to the block. - //body - Transactions []transaction.Transaction -} - -//Metadata about the chain. -type Metadata struct { - B []string -} - -var Blockchain []Block -var height uint32 // height in chain. - -var mutex = &sync.Mutex{} - -func IsBlockValid(newBlock, oldBlock Block) bool { - //if oldBlock.Index+1 != newBlock.Index { - // return false - //} - - //oldBlockHash := calculateHash(oldBlock) - //if oldBlockHash != newBlock.PrevHash { - // return false - // } - - //if calculateHash(newBlock) != newBlock.Hash { - // return false - // } - - return true -} - -func isHashValid(hash []byte, difficulty uint32) bool { - prefix := strings.Repeat("0", int(difficulty)) - //fmt.Println(string(hash[:])) - return strings.HasPrefix(string(hash[:]), prefix) -} - -func GenerateBlock(oldBlock Block, tx []transaction.Transaction) Block { - var newBlock Block - - t := time.Now() - - newBlock.Timestamp = t.String() - newBlock.Transactions = tx - - // generate block hash of old block header - oldBlockHash := calculateHash(oldBlock) - newBlock.PrevHash = oldBlockHash - - newBlock.Difficulty = difficulty - - for i := 0; ; i++ { - // increase nonce until hash is valid. - newBlock.Nonce = uint32(i) - if !isHashValid(calculateHash(newBlock), newBlock.Difficulty) { - //fmt.Println(calculateHash(newBlock), " do more work!") - h := calculateHash(newBlock) - fmt.Println(hex.EncodeToString(h) + " do more work!") - time.Sleep(time.Second) - continue - } else { - h := calculateHash(newBlock) - fmt.Println(hex.EncodeToString(h) + " work done!") - break - } - - } - //fmt.Println(newBlock) - return newBlock -} - -//calculates the block header sha256 hash. -func calculateHash(block Block) []byte { - bVersion := util.Uinttobyte(block.Version) - bNonce := util.Uinttobyte(block.Nonce) - bDifficulty := util.Uinttobyte(block.Difficulty) - - record := []byte{} - record = append(record, bVersion[:]...) - record = append(record, block.PrevHash[:]...) - record = append(record, bNonce[:]...) - record = append(record, []byte(block.Timestamp)[:]...) - record = append(record, bDifficulty[:]...) - - h := sha256.New() - h.Write([]byte(record)) - hashed := h.Sum(nil) - //fmt.Println(hex.EncodeToString(hashed)) - return hashed -} diff --git a/blockchain.conf b/blockchain.conf new file mode 100644 index 0000000..5647ad6 --- /dev/null +++ b/blockchain.conf @@ -0,0 +1,345 @@ +[Application Options] + +; ------------------------------------------------------------------------------ +; Data settings +; ------------------------------------------------------------------------------ + +; The directory to store data such as the block chain and peer addresses. The +; block chain takes several GB, so this location must have a lot of free space. +; The default is ~/.btcd/data on POSIX OSes, $LOCALAPPDATA/Btcd/data on Windows, +; ~/Library/Application Support/Btcd/data on Mac OS, and $home/btcd/data on +; Plan9. Environment variables are expanded so they may be used. NOTE: Windows +; environment variables are typically %VARIABLE%, but they must be accessed with +; $VARIABLE here. Also, ~ is expanded to $LOCALAPPDATA on Windows. +; datadir=~/.btcd/data + + +; ------------------------------------------------------------------------------ +; Network settings +; ------------------------------------------------------------------------------ + +; Use testnet. +; testnet=1 + +; Connect via a SOCKS5 proxy. NOTE: Specifying a proxy will disable listening +; for incoming connections unless listen addresses are provided via the 'listen' +; option. +; proxy=127.0.0.1:9050 +; proxyuser= +; proxypass= + +; The SOCKS5 proxy above is assumed to be Tor (https://www.torproject.org). +; If the proxy is not tor the following may be used to prevent using tor +; specific SOCKS queries to lookup addresses (this increases anonymity when tor +; is used by preventing your IP being leaked via DNS). +; noonion=1 + +; Use an alternative proxy to connect to .onion addresses. The proxy is assumed +; to be a Tor node. Non .onion addresses will be contacted with the main proxy +; or without a proxy if none is set. +; onion=127.0.0.1:9051 +; onionuser= +; onionpass= + +; Enable Tor stream isolation by randomizing proxy user credentials resulting in +; Tor creating a new circuit for each connection. This makes it more difficult +; to correlate connections. +; torisolation=1 + +; Use Universal Plug and Play (UPnP) to automatically open the listen port +; and obtain the external IP address from supported devices. NOTE: This option +; will have no effect if exernal IP addresses are specified. +; upnp=1 + +; Specify the external IP addresses your node is listening on. One address per +; line. btcd will not contact 3rd-party sites to obtain external ip addresses. +; This means if you are behind NAT, your node will not be able to advertise a +; reachable address unless you specify it here or enable the 'upnp' option (and +; have a supported device). +; externalip=1.2.3.4 +; externalip=2002::1234 + +; ****************************************************************************** +; Summary of 'addpeer' versus 'connect'. +; +; Only one of the following two options, 'addpeer' and 'connect', may be +; specified. Both allow you to specify peers that you want to stay connected +; with, but the behavior is slightly different. By default, btcd will query DNS +; to find peers to connect to, so unless you have a specific reason such as +; those described below, you probably won't need to modify anything here. +; +; 'addpeer' does not prevent connections to other peers discovered from +; the peers you are connected to and also lets the remote peers know you are +; available so they can notify other peers they can to connect to you. This +; option might be useful if you are having problems finding a node for some +; reason (perhaps due to a firewall). +; +; 'connect', on the other hand, will ONLY connect to the specified peers and +; no others. It also disables listening (unless you explicitly set listen +; addresses via the 'listen' option) and DNS seeding, so you will not be +; advertised as an available peer to the peers you connect to and won't accept +; connections from any other peers. So, the 'connect' option effectively allows +; you to only connect to "trusted" peers. +; ****************************************************************************** + +; Add persistent peers to connect to as desired. One peer per line. +; You may specify each IP address with or without a port. The default port will +; be addd automatically if one is not specified here. +; addpeer=192.168.1.1 +; addpeer=10.0.0.2:8333 +; addpeer=fe80::1 +; addpeer=[fe80::2]:8333 + +; Add persistent peers that you ONLY want to connect to as desired. One peer +; per line. You may specify each IP address with or without a port. The +; default port will be added automatically if one is not specified here. +; NOTE: Specifying this option has other side effects as described above in +; the 'addpeer' versus 'connect' summary section. +; connect=192.168.1.1 +; connect=10.0.0.2:8333 +; connect=fe80::1 +; connect=[fe80::2]:8333 + +; Maximum number of inbound and outbound peers. +; maxpeers=125 + +; Disable banning of misbehaving peers. +; nobanning=1 + +; Maximum allowed ban score before disconnecting and banning misbehaving peers. +; banthreshold=100 + +; How long to ban misbehaving peers. Valid time units are {s, m, h}. +; Minimum 1s. +; banduration=24h +; banduration=11h30m15s + +; Add whitelisted IP networks and IPs. Connected peers whose IP matches a +; whitelist will not have their ban score increased. +; whitelist=127.0.0.1 +; whitelist=::1 +; whitelist=192.168.0.0/24 +; whitelist=fd00::/16 + +; Disable DNS seeding for peers. By default, when btcd starts, it will use +; DNS to query for available peers to connect with. +; nodnsseed=1 + +; Specify the interfaces to listen on. One listen address per line. +; NOTE: The default port is modified by some options such as 'testnet', so it is +; recommended to not specify a port and allow a proper default to be chosen +; unless you have a specific reason to do otherwise. +; All interfaces on default port (this is the default): +; listen= +; All ipv4 interfaces on default port: +; listen=0.0.0.0 +; All ipv6 interfaces on default port: +; listen=:: +; All interfaces on port 8333: +; listen=:8333 +; All ipv4 interfaces on port 8333: +; listen=0.0.0.0:8333 +; All ipv6 interfaces on port 8333: +; listen=[::]:8333 +; Only ipv4 localhost on port 8333: +; listen=127.0.0.1:8333 +; Only ipv6 localhost on port 8333: +; listen=[::1]:8333 +; Only ipv4 localhost on non-standard port 8336: +; listen=127.0.0.1:8336 +; All interfaces on non-standard port 8336: +; listen=:8336 +; All ipv4 interfaces on non-standard port 8336: +; listen=0.0.0.0:8336 +; All ipv6 interfaces on non-standard port 8336: +; listen=[::]:8336 + +; Disable listening for incoming connections. This will override all listeners. +; nolisten=1 + +; Disable peer bloom filtering. See BIP0111. +; nopeerbloomfilters=1 + +; Add additional checkpoints. Format: ':' +; addcheckpoint=: + +; Add comments to the user agent that is advertised to peers. +; Must not include characters '/', ':', '(' and ')'. +; uacomment= + +; Disable committed peer filtering (CF). +; nocfilters=1 + +; ------------------------------------------------------------------------------ +; RPC server options - The following options control the built-in RPC server +; which is used to control and query information from a running btcd process. +; +; NOTE: The RPC server is disabled by default if rpcuser AND rpcpass, or +; rpclimituser AND rpclimitpass, are not specified. +; ------------------------------------------------------------------------------ + +; Secure the RPC API by specifying the username and password. You can also +; specify a limited username and password. You must specify at least one +; full set of credentials - limited or admin - or the RPC server will +; be disabled. +; rpcuser=whatever_admin_username_you_want +; rpcpass= +; rpclimituser=whatever_limited_username_you_want +; rpclimitpass= + +; Specify the interfaces for the RPC server listen on. One listen address per +; line. NOTE: The default port is modified by some options such as 'testnet', +; so it is recommended to not specify a port and allow a proper default to be +; chosen unless you have a specific reason to do otherwise. By default, the +; RPC server will only listen on localhost for IPv4 and IPv6. +; All interfaces on default port: +; rpclisten= +; All ipv4 interfaces on default port: +; rpclisten=0.0.0.0 +; All ipv6 interfaces on default port: +; rpclisten=:: +; All interfaces on port 8334: +; rpclisten=:8334 +; All ipv4 interfaces on port 8334: +; rpclisten=0.0.0.0:8334 +; All ipv6 interfaces on port 8334: +; rpclisten=[::]:8334 +; Only ipv4 localhost on port 8334: +; rpclisten=127.0.0.1:8334 +; Only ipv6 localhost on port 8334: +; rpclisten=[::1]:8334 +; Only ipv4 localhost on non-standard port 8337: +; rpclisten=127.0.0.1:8337 +; All interfaces on non-standard port 8337: +; rpclisten=:8337 +; All ipv4 interfaces on non-standard port 8337: +; rpclisten=0.0.0.0:8337 +; All ipv6 interfaces on non-standard port 8337: +; rpclisten=[::]:8337 + +; Specify the maximum number of concurrent RPC clients for standard connections. +; rpcmaxclients=10 + +; Specify the maximum number of concurrent RPC websocket clients. +; rpcmaxwebsockets=25 + +; Mirror some JSON-RPC quirks of Bitcoin Core -- NOTE: Discouraged unless +; interoperability issues need to be worked around +; rpcquirks=1 + +; Use the following setting to disable the RPC server even if the rpcuser and +; rpcpass are specified above. This allows one to quickly disable the RPC +; server without having to remove credentials from the config file. +; norpc=1 + +; Use the following setting to disable TLS for the RPC server. NOTE: This +; option only works if the RPC server is bound to localhost interfaces (which is +; the default). +; notls=1 + + +; ------------------------------------------------------------------------------ +; Mempool Settings - The following options +; ------------------------------------------------------------------------------ + +; Set the minimum transaction fee to be considered a non-zero fee, +; minrelaytxfee=0.00001 + +; Rate-limit free transactions to the value 15 * 1000 bytes per +; minute. +; limitfreerelay=15 + +; Require high priority for relaying free or low-fee transactions. +; norelaypriority=0 + +; Limit orphan transaction pool to 100 transactions. +; maxorphantx=100 + +; Do not accept transactions from remote peers. +; blocksonly=1 + +; Relay non-standard transactions regardless of default network settings. +; relaynonstd=1 + +; Reject non-standard transactions regardless of default network settings. +; rejectnonstd=1 + + +; ------------------------------------------------------------------------------ +; Optional Indexes +; ------------------------------------------------------------------------------ + +; Build and maintain a full hash-based transaction index which makes all +; transactions available via the getrawtransaction RPC. +; txindex=1 + +; Build and maintain a full address-based transaction index which makes the +; searchrawtransactions RPC available. +; addrindex=1 + +; Delete the entire address index on start up, then exit. +; dropaddrindex=0 + + +; ------------------------------------------------------------------------------ +; Signature Verification Cache +; ------------------------------------------------------------------------------ + +; Limit the signature cache to a max of 50000 entries. +; sigcachemaxsize=50000 + + +; ------------------------------------------------------------------------------ +; Coin Generation (Mining) Settings - The following options control the +; generation of block templates used by external mining applications through RPC +; calls as well as the built-in CPU miner (if enabled). +; ------------------------------------------------------------------------------ + +; Enable mining. +; +; NOTE: This is typically only useful for testing purposes such as testnet or +; simnet since the difficulty on mainnet is far too high for CPU mining to be +; worth your while. +generate=false + +; Add addresses to pay mined blocks to for CPU mining and potentially in the +; block templates generated for the getblocktemplate RPC. One address per line. +; miningaddr=1yourbitcoinaddress +; miningaddr=1yourbitcoinaddress2 +; miningaddr=1yourbitcoinaddress3 + +; Specify the minimum block size in bytes to create. By default, only +; transactions which have enough fees or a high enough priority will be included +; in generated block templates. Specifying a minimum block size will instead +; attempt to fill generated block templates up with transactions until it is at +; least the specified number of bytes. +; blockminsize=0 + +; Specify the maximum block size in bytes to create. This value will be limited +; to the consensus limit if it is larger than that value. +; blockmaxsize=750000 + +; Specify the size in bytes of the high-priority/low-fee area when creating a +; block. Transactions which consist of large amounts, old inputs, and small +; sizes have the highest priority. One consequence of this is that as low-fee +; or free transactions age, they raise in priority thereby making them more +; likely to be included in this section of a new block. This value is limited +; by the blockmaxsize option and will be limited as needed. +; blockprioritysize=50000 + + +; ------------------------------------------------------------------------------ +; Debug +; ------------------------------------------------------------------------------ + +; Debug logging level. +; Valid levels are {trace, debug, info, warn, error, critical} +; You may also specify =,=,... to set +; log level for individual subsystems. Use btcd --debuglevel=show to list +; available subsystems. +; debuglevel=info + +; The port used to listen for HTTP profile requests. The profile server will +; be disabled if this option is not specified. The profile information can be +; accessed at http://localhost:/debug/pprof once running. +; profile=6061 diff --git a/blockchain.go b/blockchain.go deleted file mode 100644 index 7e58528..0000000 --- a/blockchain.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "blockchain/block" - "blockchain/web" - - "log" - "time" - - "github.com/davecgh/go-spew/spew" - "sync" -) - -//hashGenesisBlock := "0x000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" - -func main() { - mutex := &sync.Mutex{} - - go func() { - t := time.Now() - genesisBlock := block.Block{} - genesisBlock = block.Block{Version: 0, Timestamp: t.String(), Difficulty: 1, Nonce: 0} - spew.Dump(genesisBlock) - - mutex.Lock() - block.Blockchain = append(block.Blockchain, genesisBlock) - mutex.Unlock() - }() - log.Fatal(web.Run()) -} diff --git a/config/blockchain.yml b/blockchain.yml similarity index 63% rename from config/blockchain.yml rename to blockchain.yml index 04f4b26..5db03de 100644 --- a/config/blockchain.yml +++ b/blockchain.yml @@ -1,3 +1,5 @@ seed_nodes: - 127.0.0.1:8000 - 127.0.0.1:8001 +listener: + - 127.0.0.1:8000 diff --git a/blocklog/blocklog.go b/blocklog/blocklog.go new file mode 100644 index 0000000..da1c6d7 --- /dev/null +++ b/blocklog/blocklog.go @@ -0,0 +1,480 @@ +// Copyright (c) 2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package blocklog + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" +) + +// defaultFlags specifies changes to the default logger behavior. It is set +// during package init and configured using the LOGFLAGS environment variable. +// New logger backends can override these default flags using WithFlags. +var defaultFlags uint32 + +// Flags to modify Backend's behavior. +const ( + // Llongfile modifies the logger output to include full path and line number + // of the logging callsite, e.g. /a/b/c/main.go:123. + Llongfile uint32 = 1 << iota + + // Lshortfile modifies the logger output to include filename and line number + // of the logging callsite, e.g. main.go:123. Overrides Llongfile. + Lshortfile +) + +// Read logger flags from the LOGFLAGS environment variable. Multiple flags can +// be set at once, separated by commas. +func init() { + for _, f := range strings.Split(os.Getenv("LOGFLAGS"), ",") { + switch f { + case "longfile": + defaultFlags |= Llongfile + case "shortfile": + defaultFlags |= Lshortfile + } + } +} + +// Level is the level at which a logger is configured. All messages sent +// to a level which is below the current level are filtered. +type Level uint32 + +// Level constants. +const ( + LevelTrace Level = iota + LevelDebug + LevelInfo + LevelWarn + LevelError + LevelCritical + LevelOff +) + +// levelStrs defines the human-readable names for each logging level. +var levelStrs = [...]string{"TRC", "DBG", "INF", "WRN", "ERR", "CRT", "OFF"} + +// LevelFromString returns a level based on the input string s. If the input +// can't be interpreted as a valid log level, the info level and false is +// returned. +func LevelFromString(s string) (l Level, ok bool) { + switch strings.ToLower(s) { + case "trace", "trc": + return LevelTrace, true + case "debug", "dbg": + return LevelDebug, true + case "info", "inf": + return LevelInfo, true + case "warn", "wrn": + return LevelWarn, true + case "error", "err": + return LevelError, true + case "critical", "crt": + return LevelCritical, true + case "off": + return LevelOff, true + default: + return LevelInfo, false + } +} + +// String returns the tag of the logger used in log messages, or "OFF" if +// the level will not produce any log output. +func (l Level) String() string { + if l >= LevelOff { + return "OFF" + } + return levelStrs[l] +} + +// NewBackend creates a logger backend from a Writer. +func NewBackend(w io.Writer, opts ...BackendOption) *Backend { + b := &Backend{w: w, flag: defaultFlags} + for _, o := range opts { + o(b) + } + return b +} + +// Backend is a logging backend. Subsystems created from the backend write to +// the backend's Writer. Backend provides atomic writes to the Writer from all +// subsystems. +type Backend struct { + w io.Writer + mu sync.Mutex // ensures atomic writes + flag uint32 +} + +// BackendOption is a function used to modify the behavior of a Backend. +type BackendOption func(b *Backend) + +// WithFlags configures a Backend to use the specified flags rather than using +// the package's defaults as determined through the LOGFLAGS environment +// variable. +func WithFlags(flags uint32) BackendOption { + return func(b *Backend) { + b.flag = flags + } +} + +// bufferPool defines a concurrent safe free list of byte slices used to provide +// temporary buffers for formatting log messages prior to outputting them. +var bufferPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, 120) + return &b // pointer to slice to avoid boxing alloc + }, +} + +// buffer returns a byte slice from the free list. A new buffer is allocated if +// there are not any available on the free list. The returned byte slice should +// be returned to the fee list by using the recycleBuffer function when the +// caller is done with it. +func buffer() *[]byte { + return bufferPool.Get().(*[]byte) +} + +// recycleBuffer puts the provided byte slice, which should have been obtain via +// the buffer function, back on the free list. +func recycleBuffer(b *[]byte) { + *b = (*b)[:0] + bufferPool.Put(b) +} + +// From stdlib log package. +// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid +// zero-padding. +func itoa(buf *[]byte, i int, wid int) { + // Assemble decimal in reverse order. + var b [20]byte + bp := len(b) - 1 + for i >= 10 || wid > 1 { + wid-- + q := i / 10 + b[bp] = byte('0' + i - q*10) + bp-- + i = q + } + // i < 10 + b[bp] = byte('0' + i) + *buf = append(*buf, b[bp:]...) +} + +// Appends a header in the default format 'YYYY-MM-DD hh:mm:ss.sss [LVL] TAG: '. +// If either of the Lshortfile or Llongfile flags are specified, the file named +// and line number are included after the tag and before the final colon. +func formatHeader(buf *[]byte, t time.Time, lvl, tag string, file string, line int) { + year, month, day := t.Date() + hour, min, sec := t.Clock() + ms := t.Nanosecond() / 1e6 + + itoa(buf, year, 4) + *buf = append(*buf, '-') + itoa(buf, int(month), 2) + *buf = append(*buf, '-') + itoa(buf, day, 2) + *buf = append(*buf, ' ') + itoa(buf, hour, 2) + *buf = append(*buf, ':') + itoa(buf, min, 2) + *buf = append(*buf, ':') + itoa(buf, sec, 2) + *buf = append(*buf, '.') + itoa(buf, ms, 3) + *buf = append(*buf, " ["...) + *buf = append(*buf, lvl...) + *buf = append(*buf, "] "...) + *buf = append(*buf, tag...) + if file != "" { + *buf = append(*buf, ' ') + *buf = append(*buf, file...) + *buf = append(*buf, ':') + itoa(buf, line, -1) + } + *buf = append(*buf, ": "...) +} + +// calldepth is the call depth of the callsite function relative to the +// caller of the subsystem logger. It is used to recover the filename and line +// number of the logging call if either the short or long file flags are +// specified. +const calldepth = 3 + +// callsite returns the file name and line number of the callsite to the +// subsystem logger. +func callsite(flag uint32) (string, int) { + _, file, line, ok := runtime.Caller(calldepth) + if !ok { + return "???", 0 + } + if flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if os.IsPathSeparator(file[i]) { + short = file[i+1:] + break + } + } + file = short + } + return file, line +} + +// print outputs a log message to the writer associated with the backend after +// creating a prefix for the given level and tag according to the formatHeader +// function and formatting the provided arguments using the default formatting +// rules. +func (b *Backend) print(lvl, tag string, args ...interface{}) { + t := time.Now() // get as early as possible + + bytebuf := buffer() + + var file string + var line int + if b.flag&(Lshortfile|Llongfile) != 0 { + file, line = callsite(b.flag) + } + + formatHeader(bytebuf, t, lvl, tag, file, line) + buf := bytes.NewBuffer(*bytebuf) + fmt.Fprintln(buf, args...) + *bytebuf = buf.Bytes() + + b.mu.Lock() + b.w.Write(*bytebuf) + b.mu.Unlock() + + recycleBuffer(bytebuf) +} + +// printf outputs a log message to the writer associated with the backend after +// creating a prefix for the given level and tag according to the formatHeader +// function and formatting the provided arguments according to the given format +// specifier. +func (b *Backend) printf(lvl, tag string, format string, args ...interface{}) { + t := time.Now() // get as early as possible + + bytebuf := buffer() + + var file string + var line int + if b.flag&(Lshortfile|Llongfile) != 0 { + file, line = callsite(b.flag) + } + + formatHeader(bytebuf, t, lvl, tag, file, line) + buf := bytes.NewBuffer(*bytebuf) + fmt.Fprintf(buf, format, args...) + *bytebuf = append(buf.Bytes(), '\n') + + b.mu.Lock() + b.w.Write(*bytebuf) + b.mu.Unlock() + + recycleBuffer(bytebuf) +} + +// Logger returns a new logger for a particular subsystem that writes to the +// Backend b. A tag describes the subsystem and is included in all log +// messages. The logger uses the info verbosity level by default. +func (b *Backend) Logger(subsystemTag string) Logger { + return &slog{LevelInfo, subsystemTag, b} +} + +// slog is a subsystem logger for a Backend. Implements the Logger interface. +type slog struct { + lvl Level // atomic + tag string + b *Backend +} + +// Trace formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelTrace. +// +// This is part of the Logger interface implementation. +func (l *slog) Trace(args ...interface{}) { + lvl := l.Level() + if lvl <= LevelTrace { + l.b.print("TRC", l.tag, args...) + } +} + +// Tracef formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelTrace. +// +// This is part of the Logger interface implementation. +func (l *slog) Tracef(format string, args ...interface{}) { + lvl := l.Level() + if lvl <= LevelTrace { + l.b.printf("TRC", l.tag, format, args...) + } +} + +// Debug formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelDebug. +// +// This is part of the Logger interface implementation. +func (l *slog) Debug(args ...interface{}) { + lvl := l.Level() + if lvl <= LevelDebug { + l.b.print("DBG", l.tag, args...) + } +} + +// Debugf formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelDebug. +// +// This is part of the Logger interface implementation. +func (l *slog) Debugf(format string, args ...interface{}) { + lvl := l.Level() + if lvl <= LevelDebug { + l.b.printf("DBG", l.tag, format, args...) + } +} + +// Info formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelInfo. +// +// This is part of the Logger interface implementation. +func (l *slog) Info(args ...interface{}) { + lvl := l.Level() + if lvl <= LevelInfo { + l.b.print("INF", l.tag, args...) + } +} + +// Infof formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelInfo. +// +// This is part of the Logger interface implementation. +func (l *slog) Infof(format string, args ...interface{}) { + lvl := l.Level() + if lvl <= LevelInfo { + l.b.printf("INF", l.tag, format, args...) + } +} + +// Warn formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelWarn. +// +// This is part of the Logger interface implementation. +func (l *slog) Warn(args ...interface{}) { + lvl := l.Level() + if lvl <= LevelWarn { + l.b.print("WRN", l.tag, args...) + } +} + +// Warnf formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelWarn. +// +// This is part of the Logger interface implementation. +func (l *slog) Warnf(format string, args ...interface{}) { + lvl := l.Level() + if lvl <= LevelWarn { + l.b.printf("WRN", l.tag, format, args...) + } +} + +// Error formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelError. +// +// This is part of the Logger interface implementation. +func (l *slog) Error(args ...interface{}) { + lvl := l.Level() + if lvl <= LevelError { + l.b.print("ERR", l.tag, args...) + } +} + +// Errorf formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelError. +// +// This is part of the Logger interface implementation. +func (l *slog) Errorf(format string, args ...interface{}) { + lvl := l.Level() + if lvl <= LevelError { + l.b.printf("ERR", l.tag, format, args...) + } +} + +// Critical formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelCritical. +// +// This is part of the Logger interface implementation. +func (l *slog) Critical(args ...interface{}) { + lvl := l.Level() + if lvl <= LevelCritical { + l.b.print("CRT", l.tag, args...) + } +} + +// Criticalf formats message according to format specifier, prepends the prefix +// as necessary, and writes to log with LevelCritical. +// +// This is part of the Logger interface implementation. +func (l *slog) Criticalf(format string, args ...interface{}) { + lvl := l.Level() + if lvl <= LevelCritical { + l.b.printf("CRT", l.tag, format, args...) + } +} + +// Level returns the current logging level +// +// This is part of the Logger interface implementation. +func (l *slog) Level() Level { + return Level(atomic.LoadUint32((*uint32)(&l.lvl))) +} + +// SetLevel changes the logging level to the passed level. +// +// This is part of the Logger interface implementation. +func (l *slog) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&l.lvl), uint32(level)) +} + +// Disabled is a Logger that will never output anything. +var Disabled Logger + +func init() { + Disabled = &slog{lvl: LevelOff, b: NewBackend(ioutil.Discard)} +} diff --git a/blocklog/interface.go b/blocklog/interface.go new file mode 100644 index 0000000..7179ef3 --- /dev/null +++ b/blocklog/interface.go @@ -0,0 +1,60 @@ +package blocklog + +// Logger is an interface which describes a level-based logger. A default +// implementation of Logger is implemented by this package and can be created +// by calling (*Backend).Logger. +type Logger interface { + // Tracef formats message according to format specifier and writes to + // to log with LevelTrace. + Tracef(format string, params ...interface{}) + + // Debugf formats message according to format specifier and writes to + // log with LevelDebug. + Debugf(format string, params ...interface{}) + + // Infof formats message according to format specifier and writes to + // log with LevelInfo. + Infof(format string, params ...interface{}) + + // Warnf formats message according to format specifier and writes to + // to log with LevelWarn. + Warnf(format string, params ...interface{}) + + // Errorf formats message according to format specifier and writes to + // to log with LevelError. + Errorf(format string, params ...interface{}) + + // Criticalf formats message according to format specifier and writes to + // log with LevelCritical. + Criticalf(format string, params ...interface{}) + + // Trace formats message using the default formats for its operands + // and writes to log with LevelTrace. + Trace(v ...interface{}) + + // Debug formats message using the default formats for its operands + // and writes to log with LevelDebug. + Debug(v ...interface{}) + + // Info formats message using the default formats for its operands + // and writes to log with LevelInfo. + Info(v ...interface{}) + + // Warn formats message using the default formats for its operands + // and writes to log with LevelWarn. + Warn(v ...interface{}) + + // Error formats message using the default formats for its operands + // and writes to log with LevelError. + Error(v ...interface{}) + + // Critical formats message using the default formats for its operands + // and writes to log with LevelCritical. + Critical(v ...interface{}) + + // Level returns the current logging level. + Level() Level + + // SetLevel changes the logging level to the passed level. + SetLevel(level Level) +} diff --git a/config/.gitignore b/chaincfg/.gitignore similarity index 100% rename from config/.gitignore rename to chaincfg/.gitignore diff --git a/chaincfg/chaincfg.go b/chaincfg/chaincfg.go new file mode 100644 index 0000000..1899efa --- /dev/null +++ b/chaincfg/chaincfg.go @@ -0,0 +1,153 @@ +package chaincfg + +import ( + "math/big" + "time" +) + +// DNSSeed identifies a DNS seed. +type DNSSeed struct { + // Host defines the hostname of the seed. + Host string + + // HasFiltering defines whether the seed supports filtering + // by service flags (wire.ServiceFlag). + HasFiltering bool +} + +// Params defines a Bitcoin network by its parameters. These parameters may be +// used by Bitcoin applications to differentiate networks as well as addresses +// and keys for one network from those intended for use on another network. +type Params struct { + // Name defines a human-readable identifier for the network. + Name string + + // Net defines the magic bytes used to identify the network. + // Net wire.BitcoinNet + + // DefaultPort defines the default peer-to-peer port for the network. + DefaultPort string + + // DNSSeeds defines a list of DNS seeds for the network that are used + // as one method to discover peers. + DNSSeeds []DNSSeed + + // GenesisBlock defines the first block of the chain. + //GenesisBlock *blockchain.Block + + // GenesisHash is the starting block hash. + //GenesisHash *chainhash.Hash + + // PowLimit defines the highest allowed proof of work value for a block + // as a uint256. + PowLimit *big.Int + + // PowLimitBits defines the highest allowed proof of work value for a + // block in compact form. + PowLimitBits uint32 + + // CoinbaseMaturity is the number of blocks required before newly mined + // coins (coinbase transactions) can be spent. + CoinbaseMaturity uint16 + + // SubsidyReductionInterval is the interval of blocks before the subsidy + // is reduced. + SubsidyReductionInterval int32 + + // TargetTimespan is the desired amount of time that should elapse + // before the block difficulty requirement is examined to determine how + // it should be changed in order to maintain the desired block + // generation rate. + TargetTimespan time.Duration + + // TargetTimePerBlock is the desired amount of time to generate each + // block. + TargetTimePerBlock time.Duration + + // RetargetAdjustmentFactor is the adjustment factor used to limit + // the minimum and maximum amount of adjustment that can occur between + // difficulty retargets. + RetargetAdjustmentFactor int64 + + // ReduceMinDifficulty defines whether the network should reduce the + // minimum required difficulty after a long enough period of time has + // passed without finding a block. This is really only useful for test + // networks and should not be set on a main network. + ReduceMinDifficulty bool + + // MinDiffReductionTime is the amount of time after which the minimum + // required difficulty should be reduced when a block hasn't been found. + // + // NOTE: This only applies if ReduceMinDifficulty is true. + MinDiffReductionTime time.Duration + + // Checkpoints ordered from oldest to newest. + // Checkpoints []Checkpoint + + // Mempool parameters + RelayNonStdTxs bool + + // Address encoding magics + //PubKeyHashAddrID byte // First byte of a P2PKH address + //ScriptHashAddrID byte // First byte of a P2SH address + //PrivateKeyID byte // First byte of a WIF private key + //WitnessPubKeyHashAddrID byte // First byte of a P2WPKH address + //WitnessScriptHashAddrID byte // First byte of a P2WSH address + + // BIP32 hierarchical deterministic extended key magics + //HDPrivateKeyID [4]byte + //HDPublicKeyID [4]byte + + // BIP44 coin type used in the hierarchical deterministic path for + // address generation. + //HDCoinType uint32 +} + +// MainNetParams defines the network parameters for the main Bitcoin network. +var MainNetParams = Params{ + Name: "mainnet", + //Net: wire.MainNet, + DefaultPort: "8333", + DNSSeeds: []DNSSeed{ + {"127.0.0.1", true}, + }, + + // Chain parameters + //GenesisBlock: &genesisBlock, + //GenesisHash: &genesisHash, + //PowLimit: mainPowLimit, + PowLimitBits: 0x1d00ffff, + //BIP0034Height: 227931, // 000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8 + //BIP0065Height: 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0 + //BIP0066Height: 363725, // 00000000000000000379eaa19dce8c9b722d46ae6a57c2f1a988119488b50931 + CoinbaseMaturity: 100, + SubsidyReductionInterval: 210000, + TargetTimespan: time.Hour * 24 * 14, // 14 days + TargetTimePerBlock: time.Minute * 10, // 10 minutes + RetargetAdjustmentFactor: 4, // 25% less, 400% more + ReduceMinDifficulty: false, + MinDiffReductionTime: 0, + // GenerateSupported: false, + + // Mempool parameters + RelayNonStdTxs: false, + + // Human-readable part for Bech32 encoded segwit addresses, as defined in + // BIP 173. + //Bech32HRPSegwit: "bc", // always bc for main net + + // Address encoding magics + //PubKeyHashAddrID: 0x00, // starts with 1 + //ScriptHashAddrID: 0x05, // starts with 3 + //PrivateKeyID: 0x80, // starts with 5 (uncompressed) or K (compressed) + //WitnessPubKeyHashAddrID: 0x06, // starts with p2 + //WitnessScriptHashAddrID: 0x0A, // starts with 7Xh + + // BIP32 hierarchical deterministic extended key magics + //HDPrivateKeyID: [4]byte{0x04, 0x88, 0xad, 0xe4}, // starts with xprv + //HDPublicKeyID: [4]byte{0x04, 0x88, 0xb2, 0x1e}, // starts with xpub + + // BIP44 coin type used in the hierarchical deterministic path for + // address generation. + //HDCoinType: 0, +} diff --git a/chaincfg/genesis.go b/chaincfg/genesis.go new file mode 100644 index 0000000..0b1924a --- /dev/null +++ b/chaincfg/genesis.go @@ -0,0 +1,29 @@ +// Copyright (c) 2014-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chaincfg + +//"github.com/btcsuite/btcd/chaincfg/chainhash" +//"github.com/btcsuite/btcd/wire" + +// genesisHash is the hash of the first block in the block chain for the main +// network (genesis block). +/*var genesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, + 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, +}) + +// genesisMerkleRoot is the hash of the first transaction in the genesis block +// for the main network. +var genesisMerkleRoot = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, +})*/ + +// genesisBlock defines the genesis block of the block chain which serves as the +// public transaction ledger for the main network. diff --git a/config.go b/config.go new file mode 100644 index 0000000..2d6325b --- /dev/null +++ b/config.go @@ -0,0 +1,736 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "blockchain/chaincfg" + "fmt" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + // "github.com/btcsuite/btcd/blockchain" + //"//github.com/btcsuite/btcd/chaincfg" + //"github.com/btcsuite/btcd/chaincfg/chainhash" + // "github.com/btcsuite/btcd/database" + //_ "github.com/btcsuite/btcd/database/ffldb" + // "github.com/btcsuite/btcd/mempool" + //"github.com/btcsuite/btcd/peer" + //"github.com/btcsuite/btcd/wire" + //"github.com/btcsuite/btcutil" + + log "github.com/golang/glog" + flags "github.com/jessevdk/go-flags" +) + +const ( + defaultConfigFilename = "blockchain.conf" + defaultDataDirname = "data" + defaultLogLevel = "info" + defaultLogDirname = "logs" + defaultLogFilename = "blockchain.log" + defaultMaxPeers = 125 + defaultBanDuration = time.Hour * 24 + defaultBanThreshold = 100 + defaultConnectTimeout = time.Second * 30 + defaultMaxRPCClients = 10 + defaultMaxRPCWebsockets = 25 + defaultMaxRPCConcurrentReqs = 20 + defaultDbType = "ffldb" + defaultFreeTxRelayLimit = 15.0 + //defaultTrickleInterval = peer.DefaultTrickleInterval + defaultBlockMinSize = 0 + defaultBlockMaxSize = 750000 + defaultBlockMinWeight = 0 + defaultBlockMaxWeight = 3000000 + blockMaxSizeMin = 1000 + //blockMaxSizeMax = blockchain.MaxBlockBaseSize - 1000 + blockMaxWeightMin = 4000 + //blockMaxWeightMax = blockchain.MaxBlockWeight - 4000 + defaultGenerate = false + defaultMaxOrphanTransactions = 100 + defaultMaxOrphanTxSize = 100000 + defaultSigCacheMaxSize = 100000 + defaultTxIndex = false + defaultAddrIndex = false +) + +var ( + defaultHomeDir = "." + defaultConfigFile = filepath.Join(defaultHomeDir, defaultConfigFilename) + defaultDataDir = filepath.Join(defaultHomeDir, defaultDataDirname) + //knownDbTypes = database.SupportedDrivers() + defaultRPCKeyFile = filepath.Join(defaultHomeDir, "rpc.key") + defaultRPCCertFile = filepath.Join(defaultHomeDir, "rpc.cert") + defaultLogDir = filepath.Join(defaultHomeDir, defaultLogDirname) +) + +// params is used to group parameters for various networks such as the main +// network and test networks. +type params struct { + *chaincfg.Params + rpcPort string +} + +// activeNetParams is a pointer to the parameters specific to the +// currently active network. +var activeNetParams = &mainNetParams + +// mainNetParams contains parameters specific to the main network +// (wire.MainNet). NOTE: The RPC port is intentionally different than the +// reference implementation because btcd does not handle wallet requests. The +// separate wallet process listens on the well-known port and forwards requests +// it does not handle on to btcd. This approach allows the wallet process +// to emulate the full reference implementation RPC API. +var mainNetParams = params{ + Params: &chaincfg.MainNetParams, + rpcPort: "8334", +} + +// minUint32 is a helper function to return the minimum of two uint32s. +// This avoids a math import and the need to cast to floats. +func minUint32(a, b uint32) uint32 { + if a < b { + return a + } + return b +} + +// config defines the configuration options for btcd. +// +// See loadConfig for details on the configuration load process. +type config struct { + AddCheckpoints []string `long:"addcheckpoint" description:"Add a custom checkpoint. Format: ':'"` + AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"` + AddrIndex bool `long:"addrindex" description:"Maintain a full address-based transaction index which makes the searchrawtransactions RPC available"` + AgentBlacklist []string `long:"agentblacklist" description:"A comma separated list of user-agent substrings which will cause btcd to reject any peers whose user-agent contains any of the blacklisted substrings."` + AgentWhitelist []string `long:"agentwhitelist" description:"A comma separated list of user-agent substrings which will cause btcd to require all peers' user-agents to contain one of the whitelisted substrings. The blacklist is applied before the blacklist, and an empty whitelist will allow all agents that do not fail the blacklist."` + BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"` + BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."` + BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"` + BlockMinSize uint32 `long:"blockminsize" description:"Mininum block size in bytes to be used when creating a block"` + BlockMaxWeight uint32 `long:"blockmaxweight" description:"Maximum block weight to be used when creating a block"` + BlockMinWeight uint32 `long:"blockminweight" description:"Mininum block weight to be used when creating a block"` + BlockPrioritySize uint32 `long:"blockprioritysize" description:"Size in bytes for high-priority/low-fee transactions when creating a block"` + BlocksOnly bool `long:"blocksonly" description:"Do not accept transactions from remote peers."` + ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"` + ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"` + CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"` + DataDir string `short:"b" long:"datadir" description:"Directory to store data"` + DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` + DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify =,=,... to set the log level for individual subsystems -- Use show to list available subsystems"` + DropAddrIndex bool `long:"dropaddrindex" description:"Deletes the address-based transaction index from the database on start up and then exits."` + DropCfIndex bool `long:"dropcfindex" description:"Deletes the index used for committed filtering (CF) support from the database on start up and then exits."` + DropTxIndex bool `long:"droptxindex" description:"Deletes the hash-based transaction index from the database on start up and then exits."` + ExternalIPs []string `long:"externalip" description:"Add an ip to the list of local addresses we claim to listen on to peers"` + Generate bool `long:"generate" description:"Generate (mine) bitcoins using the CPU"` + FreeTxRelayLimit float64 `long:"limitfreerelay" description:"Limit relay of transactions with no transaction fee to the given amount in thousands of bytes per minute"` + Listeners []string `long:"listen" description:"Add an interface/port to listen for connections (default all interfaces port: 8333, testnet: 18333)"` + LogDir string `long:"logdir" description:"Directory to log output."` + MaxOrphanTxs int `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"` + MaxPeers int `long:"maxpeers" description:"Max number of inbound and outbound peers"` + MiningAddrs []string `long:"miningaddr" description:"Add the specified payment address to the list of addresses to use for generated blocks -- At least one address is required if the generate option is set"` + MinRelayTxFee float64 `long:"minrelaytxfee" description:"The minimum transaction fee in BTC/kB to be considered a non-zero fee."` + DisableBanning bool `long:"nobanning" description:"Disable banning of misbehaving peers"` + NoCFilters bool `long:"nocfilters" description:"Disable committed filtering (CF) support"` + DisableCheckpoints bool `long:"nocheckpoints" description:"Disable built-in checkpoints. Don't do this unless you know what you're doing."` + DisableDNSSeed bool `long:"nodnsseed" description:"Disable DNS seeding for peers"` + DisableListen bool `long:"nolisten" description:"Disable listening for incoming connections -- NOTE: Listening is automatically disabled if the --connect or --proxy options are used without also specifying listen interfaces via --listen"` + NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"` + NoRelayPriority bool `long:"norelaypriority" description:"Do not require free or low-fee transactions to have high priority for relaying"` + DisableRPC bool `long:"norpc" description:"Disable built-in RPC server -- NOTE: The RPC server is disabled by default if no rpcuser/rpcpass or rpclimituser/rpclimitpass is specified"` + DisableStallHandler bool `long:"nostalldetect" description:"Disables the stall handler system for each peer, useful in simnet/regtest integration tests frameworks"` + DisableTLS bool `long:"notls" description:"Disable TLS for the RPC server -- NOTE: This is only allowed if the RPC server is bound to localhost"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."` + RejectReplacement bool `long:"rejectreplacement" description:"Reject transactions that attempt to replace existing transactions within the mempool through the Replace-By-Fee (RBF) signaling policy."` + RPCCert string `long:"rpccert" description:"File containing the certificate file"` + RPCKey string `long:"rpckey" description:"File containing the certificate key"` + RPCLimitPass string `long:"rpclimitpass" default-mask:"-" description:"Password for limited RPC connections"` + RPCLimitUser string `long:"rpclimituser" description:"Username for limited RPC connections"` + RPCListeners []string `long:"rpclisten" description:"Add an interface/port to listen for RPC connections (default port: 8334)"` + RPCMaxClients int `long:"rpcmaxclients" description:"Max number of RPC clients for standard connections"` + RPCMaxConcurrentReqs int `long:"rpcmaxconcurrentreqs" description:"Max number of concurrent RPC requests that may be processed concurrently"` + RPCMaxWebsockets int `long:"rpcmaxwebsockets" description:"Max number of RPC websocket connections"` + RPCPass string `short:"P" long:"rpcpass" default-mask:"-" description:"Password for RPC connections"` + RPCUser string `short:"u" long:"rpcuser" description:"Username for RPC connections"` + SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"` + TrickleInterval time.Duration `long:"trickleinterval" description:"Minimum time between attempts to send new inventory to a connected peer"` + TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"` + UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."` + ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"` + Whitelists []string `long:"whitelist" description:"Add an IP network or IP that will not be banned. (eg. 192.168.1.0/24 or ::1)"` + lookup func(string) ([]net.IP, error) + dial func(string, string, time.Duration) (net.Conn, error) + // addCheckpoints []chaincfg.Checkpoint + // miningAddrs []btcutil.Address + //minRelayTxFee btcutil.Amount + //whitelists []*net.IPNet +} + +// cleanAndExpandPath expands environment variables and leading ~ in the +// passed path, cleans the result, and returns it. +func cleanAndExpandPath(path string) string { + // Expand initial ~ to OS specific home directory. + if strings.HasPrefix(path, "~") { + homeDir := filepath.Dir(defaultHomeDir) + path = strings.Replace(path, "~", homeDir, 1) + } + + // NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%, + // but they variables can still be expanded via POSIX-style $VARIABLE. + return filepath.Clean(os.ExpandEnv(path)) +} + +// validLogLevel returns whether or not logLevel is a valid debug log level. +func validLogLevel(logLevel string) bool { + switch logLevel { + case "trace": + fallthrough + case "debug": + fallthrough + case "info": + fallthrough + case "warn": + fallthrough + case "error": + fallthrough + case "critical": + return true + } + return false +} + +// parseAndSetDebugLevels attempts to parse the specified debug level and set +// the levels accordingly. An appropriate error is returned if anything is +// invalid. +func parseAndSetDebugLevels(debugLevel string) error { + // When the specified string doesn't have any delimters, treat it as + // the log level for all subsystems. + if !strings.Contains(debugLevel, ",") && !strings.Contains(debugLevel, "=") { + // Validate debug log level. + if !validLogLevel(debugLevel) { + str := "The specified debug level [%v] is invalid" + return fmt.Errorf(str, debugLevel) + } + + // Change the logging level for all subsystems. + //setLogLevels(debugLevel) + + return nil + } + + // Split the specified string into subsystem/level pairs while detecting + // issues and update the log levels accordingly. + for _, logLevelPair := range strings.Split(debugLevel, ",") { + if !strings.Contains(logLevelPair, "=") { + str := "The specified debug level contains an invalid " + + "subsystem/level pair [%v]" + return fmt.Errorf(str, logLevelPair) + } + + // Extract the specified subsystem and log level. + // fields := strings.Split(logLevelPair, "=") + // subsysID, logLevel := fields[0], fields[1] + + // Validate log level. + // if !validLogLevel(logLevel) { + // str := "The specified debug level [%v] is invalid" + // return fmt.Errorf(str, logLevel) + // } + + //setLogLevel(subsysID, logLevel) + } + + return nil +} + +// validDbType returns whether or not dbType is a supported database type. + +// removeDuplicateAddresses returns a new slice with all duplicate entries in +// addrs removed. +func removeDuplicateAddresses(addrs []string) []string { + result := make([]string, 0, len(addrs)) + seen := map[string]struct{}{} + for _, val := range addrs { + if _, ok := seen[val]; !ok { + result = append(result, val) + seen[val] = struct{}{} + } + } + return result +} + +// normalizeAddress returns addr with the passed default port appended if +// there is not already a port specified. +func normalizeAddress(addr, defaultPort string) string { + _, _, err := net.SplitHostPort(addr) + if err != nil { + return net.JoinHostPort(addr, defaultPort) + } + return addr +} + +// normalizeAddresses returns a new slice with all the passed peer addresses +// normalized with the given default port, and all duplicates removed. +func normalizeAddresses(addrs []string, defaultPort string) []string { + for i, addr := range addrs { + addrs[i] = normalizeAddress(addr, defaultPort) + } + + return removeDuplicateAddresses(addrs) +} + +// newCheckpointFromStr parses checkpoints in the ':' format. +/*func newCheckpointFromStr(checkpoint string) (chaincfg.Checkpoint, error) { + parts := strings.Split(checkpoint, ":") + if len(parts) != 2 { + return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+ + "checkpoint %q -- use the syntax :", + checkpoint) + } + + height, err := strconv.ParseInt(parts[0], 10, 32) + if err != nil { + return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+ + "checkpoint %q due to malformed height", checkpoint) + } + + if len(parts[1]) == 0 { + return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+ + "checkpoint %q due to missing hash", checkpoint) + } + hash, err := chainhash.NewHashFromStr(parts[1]) + if err != nil { + return chaincfg.Checkpoint{}, fmt.Errorf("unable to parse "+ + "checkpoint %q due to malformed hash", checkpoint) + } + + return chaincfg.Checkpoint{ + Height: int32(height), + Hash: hash, + }, nil +} + +// parseCheckpoints checks the checkpoint strings for valid syntax +// (':') and parses them to chaincfg.Checkpoint instances. +func parseCheckpoints(checkpointStrings []string) ([]chaincfg.Checkpoint, error) { + if len(checkpointStrings) == 0 { + return nil, nil + } + checkpoints := make([]chaincfg.Checkpoint, len(checkpointStrings)) + for i, cpString := range checkpointStrings { + checkpoint, err := newCheckpointFromStr(cpString) + if err != nil { + return nil, err + } + checkpoints[i] = checkpoint + } + return checkpoints, nil +}*/ + +// filesExists reports whether the named file or directory exists. +func fileExists(name string) bool { + if _, err := os.Stat(name); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// newConfigParser returns a new command line flags parser. +func newConfigParser(cfg *config, options flags.Options) *flags.Parser { + parser := flags.NewParser(cfg, options) + return parser +} + +// loadConfig initializes and parses the config using a config file and command +// line options. +// +// The configuration proceeds as follows: +// 1) Start with a default config with sane settings +// 2) Pre-parse the command line to check for an alternative config file +// 3) Load configuration file overwriting defaults with any specified options +// 4) Parse CLI options and overwrite/add any specified options +// +// The above results in btcd functioning properly without any config settings +// while still allowing the user to override settings with config files and +// command line options. Command line options always take precedence. +func loadConfig() (*config, []string, error) { + // Default config. + cfg := config{ + ConfigFile: defaultConfigFile, + DebugLevel: defaultLogLevel, + MaxPeers: defaultMaxPeers, + BanDuration: defaultBanDuration, + BanThreshold: defaultBanThreshold, + RPCMaxClients: defaultMaxRPCClients, + RPCMaxWebsockets: defaultMaxRPCWebsockets, + RPCMaxConcurrentReqs: defaultMaxRPCConcurrentReqs, + DataDir: defaultDataDir, + LogDir: defaultLogDir, + DbType: defaultDbType, + RPCKey: defaultRPCKeyFile, + RPCCert: defaultRPCCertFile, + // MinRelayTxFee: mempool.DefaultMinRelayTxFee.ToBTC(), + FreeTxRelayLimit: defaultFreeTxRelayLimit, + // TrickleInterval: defaultTrickleInterval, + BlockMinSize: defaultBlockMinSize, + BlockMaxSize: defaultBlockMaxSize, + BlockMinWeight: defaultBlockMinWeight, + BlockMaxWeight: defaultBlockMaxWeight, + // BlockPrioritySize: mempool.DefaultBlockPrioritySize, + MaxOrphanTxs: defaultMaxOrphanTransactions, + SigCacheMaxSize: defaultSigCacheMaxSize, + Generate: defaultGenerate, + TxIndex: defaultTxIndex, + AddrIndex: defaultAddrIndex, + } + + // Pre-parse the command line options to see if an alternative config + // file or the version flag was specified. Any errors aside from the + // help message error can be ignored here since they will be caught by + // the final parse below. + preCfg := cfg + preParser := newConfigParser(&preCfg, flags.HelpFlag) + _, err := preParser.Parse() + if err != nil { + if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp { + fmt.Fprintln(os.Stderr, err) + return nil, nil, err + } + } + + // Show the version and exit if the version flag was specified. + appName := filepath.Base(os.Args[0]) + appName = strings.TrimSuffix(appName, filepath.Ext(appName)) + usageMessage := fmt.Sprintf("Use %s -h to show usage", appName) + if preCfg.ShowVersion { + //fmt.Println(appName, "version", version()) + os.Exit(0) + } + + // Load additional config from file. + var configFileError error + parser := newConfigParser(&cfg, flags.Default) + if !(preCfg.ConfigFile != defaultConfigFile) { + if _, err := os.Stat(preCfg.ConfigFile); os.IsNotExist(err) { + log.Info("Do not exist") + //err := createDefaultConfigFile(preCfg.ConfigFile) + //if err != nil { + // fmt.Fprintf(os.Stderr, "Error creating a "+ + // "default config file: %v\n", err) + //} + } + + err := flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile) + if err != nil { + if _, ok := err.(*os.PathError); !ok { + fmt.Fprintf(os.Stderr, "Error parsing config "+ + "file: %v\n", err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + configFileError = err + log.Error(configFileError) + } + } + + // Parse command line options again to ensure they take precedence. + remainingArgs, err := parser.Parse() + if err != nil { + if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp { + fmt.Fprintln(os.Stderr, usageMessage) + } + return nil, nil, err + } + + // Set the default policy for relaying non-standard transactions + // according to the default of the active network. The set + // configuration value takes precedence over the default value for the + // selected network. + + // Append the network type to the data directory so it is "namespaced" + // per network. In addition to the block database, there are other + // pieces of data that are saved to disk such as address manager state. + // All data is specific to a network, so namespacing the data directory + // means each individual piece of serialized data does not have to + // worry about changing names per network and such. + cfg.DataDir = cleanAndExpandPath(cfg.DataDir) + + // Append the network type to the log directory so it is "namespaced" + // per network in the same fashion as the data directory. + cfg.LogDir = cleanAndExpandPath(cfg.LogDir) + + // Special show command to list supported subsystems and exit. + if cfg.DebugLevel == "show" { + os.Exit(0) + } + + // Parse, validate, and set debug log level(s). + /* if err := parseAndSetDebugLevels(cfg.DebugLevel); err != nil { + err := fmt.Errorf("%s: %v", funcName, err.Error()) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + }*/ + + // Validate profile port number + if cfg.Profile != "" { + profilePort, err := strconv.Atoi(cfg.Profile) + if err != nil || profilePort < 1024 || profilePort > 65535 { + str := "%s: The profile port must be between 1024 and 65535" + log.Errorf("%s", str) + //fmt.Fprintln(os.Stderr, err) + //fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + } + + // Don't allow ban durations that are too short. + if cfg.BanDuration < time.Second { + str := "%s: The banduration option may not be less than 1s -- parsed [%v]" + log.Errorf("%s", str) + //err := fmt.Errorf(str, funcName, cfg.BanDuration) + //fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + // --addPeer and --connect do not mix. + if len(cfg.AddPeers) > 0 && len(cfg.ConnectPeers) > 0 { + str := "%s: the --addpeer and --connect options can not be " + + "mixed" + log.Errorf("%s", str) + // err := fmt.Errorf(str, funcName) + //fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + // Connect means no DNS seeding. + if len(cfg.ConnectPeers) > 0 { + cfg.DisableDNSSeed = true + } + + // Add the default listener if none were specified. The default + // listener is all addresses on the listen port for the network + // we are to connect to. + + // Check to make sure limited and admin users don't have the same username + + // Limit the max block size to a sane value. + /* if cfg.BlockMaxSize < blockMaxSizeMin || cfg.BlockMaxSize > + blockMaxSizeMax { + + str := "%s: The blockmaxsize option must be in between %d " + + "and %d -- parsed [%d]" + err := fmt.Errorf(str, funcName, blockMaxSizeMin, + blockMaxSizeMax, cfg.BlockMaxSize) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + */ + /*// Limit the max block weight to a sane value. + if cfg.BlockMaxWeight < blockMaxWeightMin || + cfg.BlockMaxWeight > blockMaxWeightMax { + + str := "%s: The blockmaxweight option must be in between %d " + + "and %d -- parsed [%d]" + err := fmt.Errorf(str, funcName, blockMaxWeightMin, + blockMaxWeightMax, cfg.BlockMaxWeight) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + // Limit the max orphan count to a sane vlue. + if cfg.MaxOrphanTxs < 0 { + str := "%s: The maxorphantx option may not be less than 0 " + + "-- parsed [%d]" + err := fmt.Errorf(str, funcName, cfg.MaxOrphanTxs) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + // Limit the block priority and minimum block sizes to max block size. + cfg.BlockPrioritySize = minUint32(cfg.BlockPrioritySize, cfg.BlockMaxSize) + cfg.BlockMinSize = minUint32(cfg.BlockMinSize, cfg.BlockMaxSize) + cfg.BlockMinWeight = minUint32(cfg.BlockMinWeight, cfg.BlockMaxWeight) + + switch { + // If the max block size isn't set, but the max weight is, then we'll + // set the limit for the max block size to a safe limit so weight takes + // precedence. + case cfg.BlockMaxSize == defaultBlockMaxSize && + cfg.BlockMaxWeight != defaultBlockMaxWeight: + + cfg.BlockMaxSize = blockchain.MaxBlockBaseSize - 1000 + + // If the max block weight isn't set, but the block size is, then we'll + // scale the set weight accordingly based on the max block size value. + case cfg.BlockMaxSize != defaultBlockMaxSize && + cfg.BlockMaxWeight == defaultBlockMaxWeight: + + cfg.BlockMaxWeight = cfg.BlockMaxSize * blockchain.WitnessScaleFactor + } + + // Look for illegal characters in the user agent comments. + for _, uaComment := range cfg.UserAgentComments { + if strings.ContainsAny(uaComment, "/:()") { + err := fmt.Errorf("%s: The following characters must not "+ + "appear in user agent comments: '/', ':', '(', ')'", + funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + } + + // --txindex and --droptxindex do not mix. + if cfg.TxIndex && cfg.DropTxIndex { + err := fmt.Errorf("%s: the --txindex and --droptxindex "+ + "options may not be activated at the same time", + funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + // --addrindex and --dropaddrindex do not mix. + if cfg.AddrIndex && cfg.DropAddrIndex { + err := fmt.Errorf("%s: the --addrindex and --dropaddrindex "+ + "options may not be activated at the same time", + funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + // --addrindex and --droptxindex do not mix. + if cfg.AddrIndex && cfg.DropTxIndex { + err := fmt.Errorf("%s: the --addrindex and --droptxindex "+ + "options may not be activated at the same time "+ + "because the address index relies on the transaction "+ + "index", + funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + // Check mining addresses are valid and saved parsed versions. + cfg.miningAddrs = make([]btcutil.Address, 0, len(cfg.MiningAddrs)) + for _, strAddr := range cfg.MiningAddrs { + addr, err := btcutil.DecodeAddress(strAddr, activeNetParams.Params) + if err != nil { + str := "%s: mining address '%s' failed to decode: %v" + err := fmt.Errorf(str, funcName, strAddr, err) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + if !addr.IsForNet(activeNetParams.Params) { + str := "%s: mining address '%s' is on the wrong network" + err := fmt.Errorf(str, funcName, strAddr) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + cfg.miningAddrs = append(cfg.miningAddrs, addr) + } + + // Ensure there is at least one mining address when the generate flag is + // set. + if cfg.Generate && len(cfg.MiningAddrs) == 0 { + str := "%s: the generate flag is set, but there are no mining " + + "addresses specified " + err := fmt.Errorf(str, funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + // Add default port to all listener addresses if needed and remove + // duplicate addresses. + cfg.Listeners = normalizeAddresses(cfg.Listeners, + activeNetParams.DefaultPort) + + // Add default port to all rpc listener addresses if needed and remove + // duplicate addresses. + cfg.RPCListeners = normalizeAddresses(cfg.RPCListeners, + activeNetParams.rpcPort) + + // Only allow TLS to be disabled if the RPC is bound to localhost + // addresses. + if !cfg.DisableRPC && cfg.DisableTLS { + allowedTLSListeners := map[string]struct{}{ + "localhost": {}, + "127.0.0.1": {}, + "::1": {}, + } + for _, addr := range cfg.RPCListeners { + host, _, err := net.SplitHostPort(addr) + if err != nil { + str := "%s: RPC listen interface '%s' is " + + "invalid: %v" + err := fmt.Errorf(str, funcName, addr, err) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + if _, ok := allowedTLSListeners[host]; !ok { + str := "%s: the --notls option may not be used " + + "when binding RPC to non localhost " + + "addresses: %s" + err := fmt.Errorf(str, funcName, addr) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + } + } + + // Add default port to all added peer addresses if needed and remove + // duplicate addresses. + cfg.AddPeers = normalizeAddresses(cfg.AddPeers, + activeNetParams.DefaultPort) + cfg.ConnectPeers = normalizeAddresses(cfg.ConnectPeers, + activeNetParams.DefaultPort) + + // Check the checkpoints for syntax errors. + cfg.addCheckpoints, err = parseCheckpoints(cfg.AddCheckpoints) + if err != nil { + str := "%s: Error parsing checkpoints: %v" + err := fmt.Errorf(str, funcName, err) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + // Setup dial and DNS resolution (lookup) functions depending on the + // specified options. The default is to use the standard + // net.DialTimeout function as well as the system DNS resolver. When a + // proxy is specified, the dial function is set to the proxy specific + // dial function and the lookup is set to use tor (unless --noonion is + // specified in which case the system DNS resolver is used). + cfg.dial = net.DialTimeout + cfg.lookup = net.LookupIP + + // Warn about missing config file only after all other configuration is + // done. This prevents the warning on help messages and invalid + // options. Note this should go directly before the return. + if configFileError != nil { + btcdLog.Warnf("%v", configFileError) + } + */ + return &cfg, remainingArgs, nil +} diff --git a/docs/mamma-audiobook.md b/docs/mamma-audiobook.md new file mode 100644 index 0000000..7544e1e --- /dev/null +++ b/docs/mamma-audiobook.md @@ -0,0 +1,44 @@ +# Skapa en Ljudbok + +## Utrustning + +- mac +- AKG + +## Program + +GarageBand + +## Process + +Video: https://www.youtube.com/watch?v=VeHMtoOn6Lg +Artikel: https://robdircks.com/yes-you-can-record-your-own-audiobook-heres-how/ +Ladda hem garageband settings. + +1. Skapa ACX account - amazon platform( part of audible ) +2. Mindre rum och om du kan minimera ljud. Inte vid väg och draperier osv att inte ha med eko. +3. + +## Inställningar + +a) +Master setting = +output -> +compression +limiter -> -3 db +noise gate -> 64db + +b) +ljudkurva ( radio röst ) + +c) speak clearly, few mistakes as possible 6-8 inces + +d) inte massa läpp ljud. Ta paus om måste slicka läpparna + +e) pop filter. breath filter. Ta up "pop pop" + +## Edit + +1. Ta bort andetag ( ser ut som raka streck ) om de låter dåliga. Om låter ok, kan spara. +2. MP3 _. 192kbit/s + diff --git a/go.mod b/go.mod index 1d47f14..e6c9caf 100644 --- a/go.mod +++ b/go.mod @@ -4,5 +4,9 @@ go 1.17 require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/glog v1.0.0 // indirect github.com/gorilla/mux v1.8.0 // indirect + github.com/jessevdk/go-flags v1.5.0 // indirect + golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/go.sum b/go.sum index ee3710e..aa68186 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,13 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main b/main new file mode 100755 index 0000000..467159a Binary files /dev/null and b/main differ diff --git a/main.go b/main.go new file mode 100644 index 0000000..774054a --- /dev/null +++ b/main.go @@ -0,0 +1,50 @@ +package main + +import ( + log "github.com/golang/glog" +) + +var ( + cfg *config +) + +func main() { + // Load configuration and parse command line. This function also + // initializes logging and configures it accordingly. + tcfg, _, err := loadConfig() + if err != nil { + return + } + cfg = tcfg + + // Get a channel that will be closed when a shutdown signal has been + // triggered either from an OS signal such as SIGINT (Ctrl+C) or from + // another subsystem such as the RPC server. + interrupt := interruptListener() + + // Create server and start it. + server, err := newServer(activeNetParams.Params, interrupt) + if err != nil { + // TODO: this logging could do with some beautifying. + log.Errorf("Unable to start server on %v: %v", cfg.Listeners, err) + return + } + defer func() { + log.Info("Gracefully shutting down the server...") + // server.Stop() + //server.WaitForShutdown() + log.Info("Server shutdown complete") + }() + server.Start() + //if serverChan != nil { + // serverChan <- server + //} + + // Wait until the interrupt signal is received from an OS signal or + // shutdown is requested through one of the subsystems such as the RPC + // server. + <-interrupt + return + //mutex := &sync.Mutex{} + +} diff --git a/mining/generator.go b/mining/generator.go new file mode 100644 index 0000000..289a571 --- /dev/null +++ b/mining/generator.go @@ -0,0 +1,225 @@ +package mining + +import ( + "blockchain/blockchain" + "fmt" + "strings" + "sync" +) + +// NewBlockTemplate returns a new block template that is ready to be solved +// using the transactions from the passed transaction source pool and a coinbase +// that either pays to the passed address if it is not nil, or a coinbase that +// is redeemable by anyone if the passed address is nil. The nil address +// functionality is useful since there are cases such as the getblocktemplate +// RPC where external mining software is responsible for creating their own +// coinbase which will replace the one generated for the block template. Thus +// the need to have configured address can be avoided. +// +// The transactions selected and included are prioritized according to several +// factors. First, each transaction has a priority calculated based on its +// value, age of inputs, and size. Transactions which consist of larger +// amounts, older inputs, and small sizes have the highest priority. Second, a +// fee per kilobyte is calculated for each transaction. Transactions with a +// higher fee per kilobyte are preferred. Finally, the block generation related +// policy settings are all taken into account. +// +// Transactions which only spend outputs from other transactions already in the +// block chain are immediately added to a priority queue which either +// prioritizes based on the priority (then fee per kilobyte) or the fee per +// kilobyte (then priority) depending on whether or not the BlockPrioritySize +// policy setting allots space for high-priority transactions. Transactions +// which spend outputs from other transactions in the source pool are added to a +// dependency map so they can be added to the priority queue once the +// transactions they depend on have been included. +// +// Once the high-priority area (if configured) has been filled with +// transactions, or the priority falls below what is considered high-priority, +// the priority queue is updated to prioritize by fees per kilobyte (then +// priority). +// +// When the fees per kilobyte drop below the TxMinFreeFee policy setting, the +// transaction will be skipped unless the BlockMinSize policy setting is +// nonzero, in which case the block will be filled with the low-fee/free +// transactions until the block size reaches that minimum size. +// +// Any transactions which would cause the block to exceed the BlockMaxSize +// policy setting, exceed the maximum allowed signature operations per block, or +// otherwise cause the block to be invalid are skipped. +// +// Given the above, a block generated by this function is of the following form: +// +// ----------------------------------- -- -- +// | Coinbase Transaction | | | +// |-----------------------------------| | | +// | | | | ----- policy.BlockPrioritySize +// | High-priority Transactions | | | +// | | | | +// |-----------------------------------| | -- +// | | | +// | | | +// | | |--- policy.BlockMaxSize +// | Transactions prioritized by fee | | +// | until <= policy.TxMinFreeFee | | +// | | | +// | | | +// | | | +// |-----------------------------------| | +// | Low-fee/Non high-priority (free) | | +// | transactions (while block size | | +// | <= policy.BlockMinSize) | | +// ----------------------------------- -- +func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress string) (*BlockTemplate, error) { + // Extend the most recently known best block. + best := g.chain.BestSnapshot() + nextBlockHeight := best.Height + 1 + _ = nextBlockHeight + + // Create a standard coinbase transaction paying to the provided + // address. NOTE: The coinbase value will be updated to include the + // fees from the selected transactions later after they have actually + // been selected. It is created here to detect any errors early + // before potentially doing a lot of work below. The extra nonce helps + // ensure the transaction is not a duplicate transaction (paying the + // same value to the same public key address would otherwise be an + // identical transaction for block version 1). + /*extraNonce := uint64(0) + coinbaseScript, err := standardCoinbaseScript(nextBlockHeight, extraNonce) + if err != nil { + return nil, err + } + coinbaseTx, err := createCoinbaseTx(g.chainParams, coinbaseScript, + nextBlockHeight, payToAddress) + if err != nil { + return nil, err + } + coinbaseSigOpCost := int64(blockchain.CountSigOps(coinbaseTx)) * blockchain.WitnessScaleFactor + */ + // Get the current source transactions and create a priority queue to + // hold the transactions which are ready for inclusion into a block + // along with some priority related and fee metadata. Reserve the same + // number of items that are available for the priority queue. Also, + // choose the initial sort order for the priority queue based on whether + // or not there is an area allocated for high-priority transactions. + //sourceTxns := g.txSource.MiningDescs() + //sortedByFee := g.policy.BlockPrioritySize == 0 + //priorityQueue := newTxPriorityQueue(len(sourceTxns), sortedByFee) + + // Create a slice to hold the transactions to be included in the + // generated block with reserved space. Also create a utxo view to + // house all of the input transactions so multiple lookups can be + // avoided. + //blockTxns := make([]*btcutil.Tx, 0, len(sourceTxns)) + //blockTxns = append(blockTxns, coinbaseTx) + //blockUtxos := blockchain.NewUtxoViewpoint() + + // dependers is used to track transactions which depend on another + // transaction in the source pool. This, in conjunction with the + // dependsOn map kept with each dependent transaction helps quickly + // determine which dependent transactions are now eligible for inclusion + // in the block once each transaction has been included. + //dependers := make(map[chainhash.Hash]map[chainhash.Hash]*txPrioItem) + + // Create slices to hold the fees and number of signature operations + // for each of the selected transactions and add an entry for the + // coinbase. This allows the code below to simply append details about + // a transaction as it is selected for inclusion in the final block. + // However, since the total fees aren't known yet, use a dummy value for + // the coinbase fee which will be updated later. + //txFees := make([]int64, 0, len(sourceTxns)) + //txSigOpCosts := make([]int64, 0, len(sourceTxns)) + //txFees = append(txFees, -1) // Updated once known + //txSigOpCosts = append(txSigOpCosts, coinbaseSigOpCost) + + //log.Debugf("Considering %d transactions for inclusion to new block") + fmt.Println("Considering for inclusing in block") + //len(sourceTxns)) + + return &BlockTemplate{ + //Block: &Block, + // Fees: txFees, + //SigOpCosts: txSigOpCosts, + //Height: nextBlockHeight, + //ValidPayAddress: payToAddress != nil, + //WitnessCommitment: witnessCommitment, + }, nil +} + +// BestSnapshot returns information about the current best chain block and +// related state as of the current point in time using the chain instance +// associated with the block template generator. The returned state must be +// treated as immutable since it is shared by all callers. +// +// This function is safe for concurrent access. +func (g *BlkTmplGenerator) BestSnapshot() *blockchain.BestState { + return g.chain.BestSnapshot() +} + +//var Blockchain []Block +var height uint32 // height in chain. + +var mutex = &sync.Mutex{} + +func IsBlockValid(newBlock, oldBlock blockchain.Block) bool { + + return true +} + +func isHashValid(hash []byte, difficulty uint32) bool { + prefix := strings.Repeat("0", int(difficulty)) + return strings.HasPrefix(string(hash[:]), prefix) +} + +/*func GenerateBlock(oldBlock Block, tx []transaction.Transaction) Block { + var newBlock Block + + t := time.Now() + + newBlock.Timestamp = t.String() + newBlock.Transactions = tx + + // generate block hash of old block header + oldBlockHash := calculateHash(oldBlock) + newBlock.PrevHash = oldBlockHash + + newBlock.Difficulty = difficulty + + for i := 0; ; i++ { + // increase nonce until hash is valid. + newBlock.Nonce = uint32(i) + if !isHashValid(calculateHash(newBlock), newBlock.Difficulty) { + //fmt.Println(calculateHash(newBlock), " do more work!") + h := calculateHash(newBlock) + fmt.Println(hex.EncodeToString(h) + " do more work!") + time.Sleep(time.Second) + continue + } else { + h := calculateHash(newBlock) + fmt.Println(hex.EncodeToString(h) + "Work done. submit block") + break + } + + } + //fmt.Println(newBlock) + return newBlock +}*/ + +//calculates the block header sha256 hash. +/*func calculateHash(block Block) []byte { + bVersion := util.Uinttobyte(block.Version) + bNonce := util.Uinttobyte(block.Nonce) + bDifficulty := util.Uinttobyte(block.Difficulty) + + record := []byte{} + record = append(record, bVersion[:]...) + record = append(record, block.PrevHash[:]...) + record = append(record, bNonce[:]...) + record = append(record, []byte(block.Timestamp)[:]...) + record = append(record, bDifficulty[:]...) + + h := sha256.New() + h.Write([]byte(record)) + hashed := h.Sum(nil) + //fmt.Println(hex.EncodeToString(hashed)) + return hashed +}*/ diff --git a/mining/mining.go b/mining/mining.go new file mode 100644 index 0000000..174796f --- /dev/null +++ b/mining/mining.go @@ -0,0 +1,437 @@ +package mining + +import ( + "blockchain/blockchain" + "blockchain/chaincfg" + "fmt" + "math/rand" + "runtime" + "sync" + "time" + + log "github.com/golang/glog" +) + +const ( + // maxNonce is the maximum value a nonce can be in a block header. + maxNonce = ^uint32(0) // 2^32 - 1 + + // maxExtraNonce is the maximum value an extra nonce used in a coinbase + // transaction can be. + // maxExtraNonce = ^uint64(0) // 2^64 - 1 + + // hpsUpdateSecs is the number of seconds to wait in between each + // update to the hashes per second monitor. + // hpsUpdateSecs = 10 + + // hashUpdateSec is the number of seconds each worker waits in between + // notifying the speed monitor with how many hashes have been completed + // while they are actively searching for a solution. This is done to + // reduce the amount of syncs between the workers that must be done to + // keep track of the hashes per second. + hashUpdateSecs = 15 +) + +var ( + // defaultNumWorkers is the default number of workers to use for mining + // and is based on the number of processor cores. This helps ensure the + // system stays reasonably responsive under heavy load. + defaultNumWorkers = uint32(runtime.NumCPU()) +) + +// Config is a descriptor containing the cpu miner configuration. +type Config struct { + // ChainParams identifies which chain parameters the cpu miner is + // associated with. + ChainParams *chaincfg.Params + + // BlockTemplateGenerator identifies the instance to use in order to + // generate block templates that the miner will attempt to solve. + BlockTemplateGenerator *BlkTmplGenerator + + // MiningAddrs is a list of payment addresses to use for the generated + // blocks. Each generated block will randomly choose one of them. + //MiningAddrs []btcutil.Address + + // ProcessBlock defines the function to call with any solved blocks. + // It typically must run the provided block through the same set of + // rules and handling as any other block coming from the network. + //ProcessBlock func(*btcutil.Block, blockchain.BehaviorFlags) (bool, error) + + // ConnectedCount defines the function to use to obtain how many other + // peers the server is connected to. This is used by the automatic + // persistent mining routine to determine whether or it should attempt + // mining. This is useful because there is no point in mining when not + // connected to any peers since there would no be anyone to send any + // found blocks to. + ConnectedCount func() int32 + + // IsCurrent defines the function to use to obtain whether or not the + // block chain is current. This is used by the automatic persistent + // mining routine to determine whether or it should attempt mining. + // This is useful because there is no point in mining if the chain is + // not current since any solved blocks would be on a side chain and and + // up orphaned anyways. + IsCurrent func() bool +} + +// BlockTemplate houses a block that has yet to be solved along with additional +// details about the fees and the number of signature operations for each +// transaction in the block. +type BlockTemplate struct { + // Block is a block that is ready to be solved by miners. Thus, it is + // completely valid with the exception of satisfying the proof-of-work + // requirement. + Block *blockchain.Block + + // Fees contains the amount of fees each transaction in the generated + // template pays in base units. Since the first transaction is the + // coinbase, the first entry (offset 0) will contain the negative of the + // sum of the fees of all other transactions. + //Fees []int64 + + // SigOpCosts contains the number of signature operations each + // transaction in the generated template performs. + //SigOpCosts []int64 + + // Height is the height at which the block template connects to the main + // chain. + Height int32 + + // ValidPayAddress indicates whether or not the template coinbase pays + // to an address or is redeemable by anyone. See the documentation on + // NewBlockTemplate for details on which this can be useful to generate + // templates without a coinbase payment address. + ValidPayAddress bool + + // WitnessCommitment is a commitment to the witness data (if any) + // within the block. This field will only be populted once segregated + // witness has been activated, and the block contains a transaction + // which has witness data. + //WitnessCommitment []byte +} + +// Miner provides facilities for solving blocks (mining) using the CPU in +// a concurrency-safe manner. It consists of two main goroutines -- a speed +// monitor and a controller for worker goroutines which generate and solve +// blocks. The number of goroutines can be set via the SetMaxGoRoutines +// function, but the default is based on the number of processor cores in the +// system which is typically sufficient. +type Miner struct { + sync.Mutex + //g *mining.BlkTmplGenerator + g *BlkTmplGenerator + cfg Config + numWorkers uint32 + started bool + // discreteMining bool + submitBlockLock sync.Mutex + wg sync.WaitGroup + workerWg sync.WaitGroup + updateNumWorkers chan struct{} + queryHashesPerSec chan float64 + updateHashes chan uint64 + // speedMonitorQuit chan struct{} + quit chan struct{} +} + +// BlkTmplGenerator provides a type that can be used to generate block templates +// based on a given mining policy and source of transactions to choose from. +// It also houses additional state required in order to ensure the templates +// are built on top of the current best chain and adhere to the consensus rules. +type BlkTmplGenerator struct { + //policy *Policy + //chainParams *chaincfg.Params + //txSource TxSource + chain *blockchain.Blockchain + //timeSource blockchain.MedianTimeSource + //sigCache *txscript.SigCache + //hashCache *txscript.HashCache +} + +// NewBlkTmplGenerator returns a new block template generator for the given +// policy using transactions from the provided transaction source. +// +// The additional state-related fields are required in order to ensure the +// templates are built on top of the current best chain and adhere to the +// consensus rules. +/*func NewBlkTmplGenerator(policy *Policy, params *chaincfg.Params, +txSource TxSource, chain *blockchain.BlockChain, +timeSource blockchain.MedianTimeSource, +sigCache *txscript.SigCache, +hashCache *txscript.HashCache) *BlkTmplGenerator { +*/ +func NewBlkTmplGenerator(policy *Policy, params *chaincfg.Params, chain *blockchain.Blockchain) *BlkTmplGenerator { + return &BlkTmplGenerator{ + chain: chain, + } +} + +// Start begins the mining proces. +// +// This function is safe for concurrent access. +func (m *Miner) Start() { + m.Lock() + defer m.Unlock() + + if m.started { + return + } + + m.quit = make(chan struct{}) + m.wg.Add(1) + go m.miningWorkerController() + + m.started = true + log.Info("miner started") +} + +// miningWorkerController launches the worker goroutines that are used to +// generate block templates and solve them. It also provides the ability to +// dynamically adjust the number of running worker goroutines. +// +// It must be run as a goroutine. +func (m *Miner) miningWorkerController() { + // launchWorkers groups common code to launch a specified number of + // workers for generating blocks. + var runningWorkers []chan struct{} + launchWorkers := func(numWorkers uint32) { + for i := uint32(0); i < numWorkers; i++ { + quit := make(chan struct{}) + runningWorkers = append(runningWorkers, quit) + + m.workerWg.Add(1) + go m.generateBlocks(quit) + } + } + + // Launch the current number of workers by default. + runningWorkers = make([]chan struct{}, 0, m.numWorkers) + launchWorkers(m.numWorkers) + +out: + for { + select { + // Update the number of running workers. + case <-m.updateNumWorkers: + // No change. + numRunning := uint32(len(runningWorkers)) + if m.numWorkers == numRunning { + continue + } + + // Add new workers. + if m.numWorkers > numRunning { + launchWorkers(m.numWorkers - numRunning) + continue + } + + // Signal the most recently created goroutines to exit. + for i := numRunning - 1; i >= m.numWorkers; i-- { + close(runningWorkers[i]) + runningWorkers[i] = nil + runningWorkers = runningWorkers[:i] + } + + case <-m.quit: + for _, quit := range runningWorkers { + close(quit) + } + break out + } + } + + // Wait until all workers shut down to stop the speed monitor since + // they rely on being able to send updates to it. + m.workerWg.Wait() + //close(m.speedMonitorQuit) + m.wg.Done() +} + +// generateBlocks is a worker that is controlled by the miningWorkerController. +// It is self contained in that it creates block templates and attempts to solve +// them while detecting when it is performing stale work and reacting +// accordingly by generating a new block template. When a block is solved, it +// is submitted. +// +// It must be run as a goroutine. +func (m *Miner) generateBlocks(quit chan struct{}) { + log.Info("Starting generate blocks worker") + + // Start a ticker which is used to signal checks for stale work and + // updates to the speed monitor. + ticker := time.NewTicker(time.Second * hashUpdateSecs) + defer ticker.Stop() +out: + for { + // Quit when the miner is stopped. + select { + case <-quit: + break out + default: + // Non-blocking select to fall through + } + + // Wait until there is a connection to at least one other peer + // since there is no way to relay a found block or receive + // transactions to work on when there are no connected peers. + // if m.cfg.ConnectedCount() == 0 { + // time.Sleep(time.Second) + // continue + // } + + // No point in searching for a solution before the chain is + // synced. Also, grab the same lock as used for block + // submission, since the current block will be changing and + // this would otherwise end up building a new block template on + // a block that is in the process of becoming stale. + m.submitBlockLock.Lock() + curHeight := m.g.BestSnapshot().Height + if curHeight != 0 && !m.cfg.IsCurrent() { + m.submitBlockLock.Unlock() + time.Sleep(time.Second) + continue + } + + // Choose a payment address at random. + rand.Seed(time.Now().UnixNano()) + //payToAddr := m.cfg.MiningAddrs[rand.Intn(len(m.cfg.MiningAddrs))] + payToAddr := "abc123" + + // Create a new block template using the available transactions + // in the memory pool as a source of transactions to potentially + // include in the block. + template, err := m.g.NewBlockTemplate(payToAddr) + m.submitBlockLock.Unlock() + if err != nil { + errStr := fmt.Sprintf("Failed to create new block "+ + "template: %v", err) + log.Fatalf("%s.\n", errStr) + continue + } + + // Attempt to solve the block. The function will exit early + // with false when conditions that trigger a stale block, so + // a new block template can be generated. When the return is + // true a solution was found, so submit the solved block. + if m.solveBlock(template.Block, curHeight+1, ticker, quit) { + // block := btcutil.NewBlock(template.Block) + // TODO + //m.submitBlock(block) + } + } + + m.workerWg.Done() + log.Infof("Generate blocks worker done") +} + +// solveBlock attempts to find some combination of a nonce, extra nonce, and +// current timestamp which makes the passed block hash to a value less than the +// target difficulty. The timestamp is updated periodically and the passed +// block is modified with all tweaks during this process. This means that +// when the function returns true, the block is ready for submission. +// +// This function will return early with false when conditions that trigger a +// stale block such as a new block showing up or periodically when there are +// new transactions and enough time has elapsed without finding a solution. +func (m *Miner) solveBlock(msgBlock *blockchain.Block, blockHeight int32, + ticker *time.Ticker, quit chan struct{}) bool { + + // Choose a random extra nonce offset for this block template and + // worker. + //enOffset, err := wire.RandomUint64() + //if err != nil { + // log.Errorf("Unexpected error while generating random "+ + // "extra nonce offset: %v", err) + // enOffset = 0 + // } + + // Create some convenience variables. + header := &msgBlock.Header + // targetDifficulty := "" + //targetDifficulty := blockchain.CompactToBig(header.Bits) + + // Initial state. + // lastGenerated := time.Now() + //lastTxUpdate := m.g.TxSource().LastUpdated() + hashesCompleted := uint64(0) + + // Note that the entire extra nonce range is iterated and the offset is + // added relying on the fact that overflow will wrap around 0 as + // provided by the Go spec. + // for extraNonce := uint64(0); extraNonce < maxExtraNonce; extraNonce++ { + // Update the extra nonce in the block template with the + // new value by regenerating the coinbase script and + // setting the merkle root to the new value. + // m.g.UpdateExtraNonce(msgBlock, blockHeight, extraNonce+enOffset) + + // Search through the entire nonce range for a solution while + // periodically checking for early quit and stale block + // conditions along with updates to the speed monitor. + for i := uint32(0); i <= maxNonce; i++ { + select { + case <-quit: + return false + + case <-ticker.C: + m.updateHashes <- hashesCompleted + hashesCompleted = 0 + + // The current block is stale if the best block + // has changed. + best := m.g.BestSnapshot() + _ = best + //if !header.PrevHash.IsEqual(&best.Hash) { + // return false + //} + + // The current block is stale if the memory pool + // has been updated since the block template was + // generated and it has been at least one + // minute. + //if lastTxUpdate != m.g.TxSource().LastUpdated() && + // time.Now().After(lastGenerated.Add(time.Minute)) { + + // return false + //} + + //m.g.UpdateBlockTime(msgBlock) + + default: + // Non-blocking select to fall through + } + + // Update the nonce and hash the block header. Each + // hash is a sha256, so + // increment the number of hashes completed for each + // attempt accordingly. + header.Nonce = i + hash := header.BlockHash() + _ = hash + hashesCompleted += 1 + + // The block is solved when the new block hash is less + // than the target difficulty. Yay! + //if blockchain.HashToBig(&hash).Cmp(targetDifficulty) <= 0 { + // m.updateHashes <- hashesCompleted + // return true + // } + } + //} + + return false +} + +// New returns a new instance of a miner for the provided configuration. +// Use Start to begin the mining process. See the documentation for Miner +// type for more details. +func New(cfg *Config) *Miner { + return &Miner{ + g: cfg.BlockTemplateGenerator, + cfg: *cfg, + numWorkers: defaultNumWorkers, + updateNumWorkers: make(chan struct{}), + queryHashesPerSec: make(chan float64), + updateHashes: make(chan uint64), + } +} diff --git a/mining/policy.go b/mining/policy.go new file mode 100644 index 0000000..6ddd135 --- /dev/null +++ b/mining/policy.go @@ -0,0 +1,42 @@ +// Copyright (c) 2014-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package mining + +const ( + // UnminedHeight is the height used for the "block" height field of the + // contextual transaction information provided in a transaction store + // when it has not yet been mined into a block. + UnminedHeight = 0x7fffffff +) + +// Policy houses the policy (configuration parameters) which is used to control +// the generation of block templates. See the documentation for +// NewBlockTemplate for more details on each of these parameters are used. +type Policy struct { + // BlockMinWeight is the minimum block weight to be used when + // generating a block template. + BlockMinWeight uint32 + + // BlockMaxWeight is the maximum block weight to be used when + // generating a block template. + BlockMaxWeight uint32 + + // BlockMinWeight is the minimum block size to be used when generating + // a block template. + BlockMinSize uint32 + + // BlockMaxSize is the maximum block size to be used when generating a + // block template. + BlockMaxSize uint32 + + // BlockPrioritySize is the size in bytes for high-priority / low-fee + // transactions to be used when generating a block template. + BlockPrioritySize uint32 + + // TxMinFreeFee is the minimum fee in Satoshi/1000 bytes that is + // required for a transaction to be treated as free for mining purposes + // (block template generation). + //TxMinFreeFee btcutil.Amount +} diff --git a/server.go b/server.go new file mode 100644 index 0000000..6a8cd8c --- /dev/null +++ b/server.go @@ -0,0 +1,434 @@ +package main + +import ( + "blockchain/blockchain" + "blockchain/chaincfg" + "blockchain/mining" + miner "blockchain/mining" + "net" + "sync" + "sync/atomic" + "time" +) + +// server provides a bitcoin server for handling communications to and from +// bitcoin peers. +type server struct { + // The following variables must only be used atomically. + // Putting the uint64s first makes them 64-bit aligned for 32-bit systems. + bytesReceived uint64 // Total bytes received from all peers since start. + bytesSent uint64 // Total bytes sent by all peers since start. + started int32 + shutdown int32 + shutdownSched int32 + startupTime int64 + + chainParams *chaincfg.Params + // addrManager *addrmgr.AddrManager + //connManager *connmgr.ConnManager + //sigCache *txscript.SigCache + //hashCache *txscript.HashCache + //rpcServer *rpcServer + //syncManager *netsync.SyncManager + chain *blockchain.Blockchain + //txMemPool *mempool.TxPool + Miner *miner.Miner + //modifyRebroadcastInv chan interface{} + //newPeers chan *serverPeer + //donePeers chan *serverPeer + //banPeers chan *serverPeer + //query chan interface{} + //relayInv chan relayMsg + //broadcast chan broadcastMsg + //peerHeightsUpdate chan updatePeerHeightsMsg + wg sync.WaitGroup + quit chan struct{} + //nat NAT + //db database.DB + //timeSource blockchain.MedianTimeSource + //services wire.ServiceFlag + // The following fields are used for optional indexes. They will be nil + // if the associated index is not enabled. These fields are set during + // initial creation of the server and never changed afterwards, so they + // do not need to be protected for concurrent access. + //txIndex *indexers.TxIndex + //addrIndex *indexers.AddrIndex + //cfIndex *indexers.CfIndex + // The fee estimator keeps track of how long transactions are left in + // the mempool before they are mined into blocks. + //feeEstimator *mempool.FeeEstimator + // cfCheckptCaches stores a cached slice of filter headers for cfcheckpt + // messages for each filter type. + // cfCheckptCaches map[wire.FilterType][]cfHeaderKV + //cfCheckptCachesMtx sync.RWMutex +} + +// newServer returns a new server configured to listen on addr for the +// network type specified by chainParams. Use start to begin accepting +// connections from peers. +func newServer(chainParams *chaincfg.Params, interrupt <-chan struct{}) (*server, error) { + // db database.DB, chainParams *chaincfg.Params, + //services := defaultServices + //if cfg.NoPeerBloomFilters { + // services &^= wire.SFNodeBloom + // } + // if cfg.NoCFilters { + // services &^= wire.SFNodeCF + // } + // amgr := addrmgr.New(cfg.DataDir, btcdLookup) + var listeners []net.Listener + _ = listeners + // var nat NAT + //var err error + // listeners, nat, err = initListeners(amgr, listenAddrs, services) + //if err != nil { + // return nil, err + //} + //if len(listeners) == 0 { + // return nil, errors.New("no valid listen address") + //} + // if len(agentBlacklist) > 0 { + // srvrLog.Infof("User-agent blacklist %s", agentBlacklist) + // } + // if len(agentWhitelist) > 0 { + // srvrLog.Infof("User-agent whitelist %s", agentWhitelist) + // } + s := server{ + // chainParams: chainParams, + // addrManager: amgr, + // newPeers: make(chan *serverPeer, cfg.MaxPeers), + // donePeers: make(chan *serverPeer, cfg.MaxPeers), + // banPeers: make(chan *serverPeer, cfg.MaxPeers), + // query: make(chan interface{}), + // relayInv: make(chan relayMsg, cfg.MaxPeers), + // broadcast: make(chan broadcastMsg, cfg.MaxPeers), + quit: make(chan struct{}), + // modifyRebroadcastInv: make(chan interface{}), + // peerHeightsUpdate: make(chan updatePeerHeightsMsg), + // nat: nat, + // db: db, + // timeSource: blockchain.NewMedianTime(), + //services: services, + // sigCache: txscript.NewSigCache(cfg.SigCacheMaxSize), + // hashCache: txscript.NewHashCache(cfg.SigCacheMaxSize), + // cfCheckptCaches: make(map[wire.FilterType][]cfHeaderKV), + // agentBlacklist: agentBlacklist, + // agentWhitelist: agentWhitelist, + } + // Create the transaction and address indexes if needed. + // + // CAUTION: the txindex needs to be first in the indexes array because + // the addrindex uses data from the txindex during catchup. If the + // addrindex is run first, it may not have the transactions from the + // current block indexed. + // var indexes []indexers.Indexer + // if cfg.TxIndex || cfg.AddrIndex { + // Enable transaction index if address index is enabled since it + // // requires it. + // if !cfg.TxIndex { + // indxLog.Infof("Transaction index enabled because it " + + // "is required by the address index") + // cfg.TxIndex = true + // } else { + // indxLog.Info("Transaction index is enabled") + // } + // s.txIndex = indexers.NewTxIndex(db) + // indexes = append(indexes, s.txIndex) + // } + // if cfg.AddrIndex { + // indxLog.Info("Address index is enabled") + // s.addrIndex = indexers.NewAddrIndex(db, chainParams) + // indexes = append(indexes, s.addrIndex) + // } + // if !cfg.NoCFilters { + // indxLog.Info("Committed filter index is enabled") + // s.cfIndex = indexers.NewCfIndex(db, chainParams) + // indexes = append(indexes, s.cfIndex) + // } + + // Create an index manager if any of the optional indexes are enabled. + // var indexManager blockchain.IndexManager + // if len(indexes) > 0 { + // indexManager = indexers.NewManager(db, indexes) + // } + + // Merge given checkpoints with the default ones unless they are disabled. + // var checkpoints []chaincfg.Checkpoint + // if !cfg.DisableCheckpoints { + // checkpoints = mergeCheckpoints(s.chainParams.Checkpoints, cfg.addCheckpoints) + // } + + // Create a new block chain instance with the appropriate configuration. + var err error + s.chain, err = blockchain.New(&blockchain.Config{ + // DB: s.db, + // Interrupt: interrupt, + ChainParams: s.chainParams, + // Checkpoints: checkpoints, + // TimeSource: s.timeSource, + // SigCache: s.sigCache, + // IndexManager: indexManager, + // HashCache: s.hashCache, + }) + if err != nil { + return nil, err + } + + // Search for a FeeEstimator state in the database. If none can be found + // or if it cannot be loaded, create a new one. + // db.Update(func(tx database.Tx) error { + // metadata := tx.Metadata() + // feeEstimationData := metadata.Get(mempool.EstimateFeeDatabaseKey) + // if feeEstimationData != nil { + // delete it from the database so that we don't try to restore the + // // same thing again somehow. + // metadata.Delete(mempool.EstimateFeeDatabaseKey) + + // If there is an error, log it and make a new fee estimator. + // var err error + // s.feeEstimator, err = mempool.RestoreFeeEstimator(feeEstimationData) + + // if err != nil { + // peerLog.Errorf("Failed to restore fee estimator %v", err) + // } + // } + + // return nil + // }) + + // If no feeEstimator has been found, or if the one that has been found + // is behind somehow, create a new one and start over. + // if s.feeEstimator == nil || s.feeEstimator.LastKnownHeight() != s.chain.BestSnapshot().Height { + // s.feeEstimator = mempool.NewFeeEstimator( + // mempool.DefaultEstimateFeeMaxRollback, + // mempool.DefaultEstimateFeeMinRegisteredBlocks) + // } + + // txC := mempool.Config{ + // Policy: mempool.Policy{ + // DisableRelayPriority: cfg.NoRelayPriority, + // AcceptNonStd: cfg.RelayNonStd, + // FreeTxRelayLimit: cfg.FreeTxRelayLimit, + // MaxOrphanTxs: cfg.MaxOrphanTxs, + // MaxOrphanTxSize: defaultMaxOrphanTxSize, + // MaxSigOpCostPerTx: blockchain.MaxBlockSigOpsCost / 4, + // MinRelayTxFee: cfg.minRelayTxFee, + // MaxTxVersion: 2, + // RejectReplacement: cfg.RejectReplacement, + // }, + // ChainParams: chainParams, + // FetchUtxoView: s.chain.FetchUtxoView, + // BestHeight: func() int32 { return s.chain.BestSnapshot().Height }, + // MedianTimePast: func() time.Time { return s.chain.BestSnapshot().MedianTime }, + // CalcSequenceLock: func(tx *btcutil.Tx, view *blockchain.UtxoViewpoint) (*blockchain.SequenceLock, error) { + // return s.chain.CalcSequenceLock(tx, view, true) + // }, + // IsDeploymentActive: s.chain.IsDeploymentActive, + // SigCache: s.sigCache, + // HashCache: s.hashCache, + // AddrIndex: s.addrIndex, + // FeeEstimator: s.feeEstimator, + // } + //s.txMemPool = mempool.New(&txC) + + /*s.syncManager, err = netsync.New(&netsync.Config{ + PeerNotifier: &s, + Chain: s.chain, + TxMemPool: s.txMemPool, + ChainParams: s.chainParams, + DisableCheckpoints: cfg.DisableCheckpoints, + MaxPeers: cfg.MaxPeers, + FeeEstimator: s.feeEstimator, + }) + if err != nil { + return nil, err + }*/ + + // Create the mining policy and block template generator based on the + // configuration options. + // + // NOTE: The miner relies on the mempool, so the mempool has to be + // created before calling the function to create the miner. + policy := miner.Policy{ + // BlockMinWeight: cfg.BlockMinWeight, + //BlockMaxWeight: cfg.BlockMaxWeight, + //BlockMinSize: cfg.BlockMinSize, + //BlockMaxSize: cfg.BlockMaxSize, + //BlockPrioritySize: cfg.BlockPrioritySize, + // TxMinFreeFee: cfg.minRelayTxFee, + } + blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, s.chainParams, s.chain) //s.chainParams, s.txMemPool, s.chain, s.timeSource, + //s.sigCache, s.hashCache) + + s.Miner = miner.New(&miner.Config{ + ChainParams: chainParams, + BlockTemplateGenerator: blockTemplateGenerator, + //MiningAddrs: cfg.miningAddrs, + //ProcessBlock: s.syncManager.ProcessBlock, + //ConnectedCount: s.ConnectedCount, + //IsCurrent: s.syncManager.IsCurrent, + }) + + // Only setup a function to return new addresses to connect to when + // not running in connect-only mode. The simulation network is always + // in connect-only mode since it is only intended to connect to + // specified peers and actively avoid advertising and connecting to + // discovered peers in order to prevent it from becoming a public test + // network. + /* var newAddressFunc func() (net.Addr, error) + if !cfg.SimNet && len(cfg.ConnectPeers) == 0 { + newAddressFunc = func() (net.Addr, error) { + for tries := 0; tries < 100; tries++ { + addr := s.addrManager.GetAddress() + if addr == nil { + break + } + + // Address will not be invalid, local or unroutable + // because addrmanager rejects those on addition. + // Just check that we don't already have an address + // in the same group so that we are not connecting + // to the same network segment at the expense of + // others. + key := addrmgr.GroupKey(addr.NetAddress()) + if s.OutboundGroupCount(key) != 0 { + continue + } + + // only allow recent nodes (10mins) after we failed 30 + // times + if tries < 30 && time.Since(addr.LastAttempt()) < 10*time.Minute { + continue + } + + // allow nondefault ports after 50 failed tries. + if tries < 50 && fmt.Sprintf("%d", addr.NetAddress().Port) != + activeNetParams.DefaultPort { + continue + } + + // Mark an attempt for the valid address. + // s.addrManager.Attempt(addr.NetAddress()) + + // addrString := addrmgr.NetAddressKey(addr.NetAddress()) + // return addrStringToNetAddr(addrString) + } + + return nil, errors.New("no valid connect address") + } + } + + // Create a connection manager. + targetOutbound := defaultTargetOutbound + if cfg.MaxPeers < targetOutbound { + targetOutbound = cfg.MaxPeers + } + cmgr, err := connmgr.New(&connmgr.Config{ + Listeners: listeners, + OnAccept: s.inboundPeerConnected, + RetryDuration: connectionRetryInterval, + TargetOutbound: uint32(targetOutbound), + Dial: btcdDial, + OnConnection: s.outboundPeerConnected, + GetNewAddress: newAddressFunc, + }) + if err != nil { + return nil, err + } + s.connManager = cmgr + + // Start up persistent peers. + permanentPeers := cfg.ConnectPeers + if len(permanentPeers) == 0 { + permanentPeers = cfg.AddPeers + } + for _, addr := range permanentPeers { + netAddr, err := addrStringToNetAddr(addr) + if err != nil { + return nil, err + } + + go s.connManager.Connect(&connmgr.ConnReq{ + Addr: netAddr, + Permanent: true, + }) + } + */ + /*if !cfg.DisableRPC { + // Setup listeners for the configured RPC listen addresses and + // TLS settings. + rpcListeners, err := setupRPCListeners() + if err != nil { + return nil, err + } + if len(rpcListeners) == 0 { + return nil, errors.New("RPCS: No valid listen address") + } + + s.rpcServer, err = newRPCServer(&rpcserverConfig{ + Listeners: rpcListeners, + StartupTime: s.startupTime, + ConnMgr: &rpcConnManager{&s}, + SyncMgr: &rpcSyncMgr{&s, s.syncManager}, + TimeSource: s.timeSource, + Chain: s.chain, + ChainParams: chainParams, + DB: db, + TxMemPool: s.txMemPool, + Generator: blockTemplateGenerator, + CPUMiner: s.cpuMiner, + TxIndex: s.txIndex, + AddrIndex: s.addrIndex, + CfIndex: s.cfIndex, + FeeEstimator: s.feeEstimator, + }) + if err != nil { + return nil, err + } + + // Signal process shutdown when the RPC server requests it. + go func() { + <-s.rpcServer.RequestedProcessShutdown() + shutdownRequestChannel <- struct{}{} + }() + }*/ + + return &s, nil +} + +// Start begins accepting connections from peers. +func (s *server) Start() { + // Already started? + if atomic.AddInt32(&s.started, 1) != 1 { + return + } + + //srvrLog.Trace("Starting server") + + // Server startup time. Used for the uptime command for uptime calculation. + s.startupTime = time.Now().Unix() + + // Start the peer handler which in turn starts the address and block + // managers. + s.wg.Add(1) + //go s.peerHandler() + + //if s.nat != nil { + // s.wg.Add(1) + // go s.upnpUpdateThread() + //} + + //if !cfg.DisableRPC { + s.wg.Add(1) + + // Start the rebroadcastHandler, which ensures user tx received by + // the RPC server are rebroadcast until being included in a block. + // go s.rebroadcastHandler() + + // s.rpcServer.Start() + //} + // Start the miner if generation is enabled. + if cfg.Generate { + s.Miner.Start() + } +} diff --git a/signal.go b/signal.go new file mode 100644 index 0000000..95d3c76 --- /dev/null +++ b/signal.go @@ -0,0 +1,74 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "os/signal" +) + +// shutdownRequestChannel is used to initiate shutdown from one of the +// subsystems using the same code paths as when an interrupt signal is received. +var shutdownRequestChannel = make(chan struct{}) + +// interruptSignals defines the default signals to catch in order to do a proper +// shutdown. This may be modified during init depending on the platform. +var interruptSignals = []os.Signal{os.Interrupt} + +// interruptListener listens for OS Signals such as SIGINT (Ctrl+C) and shutdown +// requests from shutdownRequestChannel. It returns a channel that is closed +// when either signal is received. +func interruptListener() <-chan struct{} { + c := make(chan struct{}) + go func() { + interruptChannel := make(chan os.Signal, 1) + signal.Notify(interruptChannel, interruptSignals...) + + // Listen for initial shutdown signal and close the returned + // channel to notify the caller. + select { + case sig := <-interruptChannel: + fmt.Print(sig) + //btcdLog.Infof("Received signal (%s). Shutting down...", + //sig) + + case <-shutdownRequestChannel: + // btcdLog.Info("Shutdown requested. Shutting down...") + } + close(c) + + // Listen for repeated signals and display a message so the user + // knows the shutdown is in progress and the process is not + // hung. + for { + select { + case sig := <-interruptChannel: + // btcdLog.Infof("Received signal (%s). Already "+ + // "shutting down...", sig) + fmt.Print(sig) + + case <-shutdownRequestChannel: + // btcdLog.Info("Shutdown requested. Already " + + // "shutting down...") + } + } + }() + + return c +} + +// interruptRequested returns true when the channel returned by +// interruptListener was closed. This simplifies early shutdown slightly since +// the caller can just use an if statement instead of a select. +func interruptRequested(interrupted <-chan struct{}) bool { + select { + case <-interrupted: + return true + default: + } + + return false +} diff --git a/web/web.go b/web/web.go index 4a2872c..9b49802 100644 --- a/web/web.go +++ b/web/web.go @@ -1,15 +1,12 @@ package web import ( - "blockchain/block" "blockchain/transaction" - "encoding/json" - "io" + "net/http" "time" - "github.com/davecgh/go-spew/spew" "github.com/gorilla/mux" ) @@ -41,12 +38,13 @@ func makeMuxRouter() http.Handler { } func handleGetBlockchain(w http.ResponseWriter, r *http.Request) { - bytes, err := json.MarshalIndent(block.Blockchain, "", " ") + /*bytes, err := json.MarshalIndent(blockchain.Blockchain, "", " ") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } io.WriteString(w, string(bytes)) + */ } func respondWithJSON(w http.ResponseWriter, r *http.Request, code int, payload interface{}) { @@ -70,6 +68,7 @@ func handleWriteBlock(w http.ResponseWriter, r *http.Request) { o1 := transaction.TxOutput{Value: 100, PubKey: data} tx := transaction.Transaction{[]transaction.TxInput{i1}, []transaction.TxOutput{o1}} + _ = tx /* decoder := json.NewDecoder(r.Body) if err := decoder.Decode(&tx); err != nil { @@ -80,14 +79,15 @@ func handleWriteBlock(w http.ResponseWriter, r *http.Request) { //ensure atomicity when creating new block //mutex.Lock() - newBlock := block.GenerateBlock(block.Blockchain[len(block.Blockchain)-1], []transaction.Transaction{tx}) + // newBlock := blockchain.GenerateBlock(blockchain.Blockchain[len(blockchain.Blockchain)-1], []transaction.Transaction{tx}) //mutex.Unlock() - if block.IsBlockValid(newBlock, block.Blockchain[len(block.Blockchain)-1]) { - block.Blockchain = append(block.Blockchain, newBlock) - spew.Dump(block.Blockchain) - } + //if blockchain.IsBlockValid(newBlock, blockchain.Blockchain[len(blockchain.Blockchain)-1]) { + // blockchain.Blockchain = append(blockchain.Blockchain, newBlock) + // spew.Dump(blockchain.Blockchain) + //} - respondWithJSON(w, r, http.StatusCreated, newBlock) + //respondWithJSON(w, r, http.StatusCreated, newBlock) + respondWithJSON(w, r, http.StatusCreated, 200) } diff --git a/yaml.go b/yaml.go new file mode 100644 index 0000000..e86a4b5 --- /dev/null +++ b/yaml.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + + "gopkg.in/yaml.v3" +) + +type User struct { + Name string + Occupation string +} + +func yamlconfig() { + + yfile, err := ioutil.ReadFile("config/blockchain.yaml") + + if err != nil { + + log.Fatal(err) + } + + data := make(map[string]User) + + err2 := yaml.Unmarshal(yfile, &data) + + if err2 != nil { + + log.Fatal(err2) + } + + for k, v := range data { + + fmt.Printf("%s: %s\n", k, v) + } +}