RandomLoader()
{
BackendMakers().report(new RandomFactory);
- L << Logger::Info << "[randombackend] This is the random backend version " VERSION " reporting" << endl;
+ g_log << Logger::Info << "[randombackend] This is the random backend version " VERSION " reporting" << endl;
}
};
resolver.axfr(remote,domain.c_str());
db->startTransaction(domain, domain_id);
- L<<Logger::Error<<"AXFR started for '"<<domain<<"'"<<endl;
+ g_log<<Logger::Error<<"AXFR started for '"<<domain<<"'"<<endl;
Resolver::res_t recs;
while(resolver.axfrChunk(recs)) {
}
db->commitTransaction();
db->setFresh(domain_id);
- L<<Logger::Error<<"AXFR done for '"<<domain<<"'"<<endl;
+ g_log<<Logger::Error<<"AXFR done for '"<<domain<<"'"<<endl;
Supermaster/Superslave capability
---------------------------------
else {
string msg = "Trying to insert non-zone data, name='"+bdr.qname.toLogString()+"', qtype="+qtype.getName()+", zone='"+bb2.d_name.toLogString()+"'";
if(s_ignore_broken_records) {
- L<<Logger::Warning<<msg<< " ignored" << endl;
+ g_log<<Logger::Warning<<msg<< " ignored" << endl;
return;
}
else
safePutBBDomainInfo(bbd);
- L<<Logger::Warning<<"Zone "<<domainname<< " loaded"<<endl;
+ g_log<<Logger::Warning<<"Zone "<<domainname<< " loaded"<<endl;
return "Loaded zone " + domainname.toLogString() + " from " + filename;
}
{
if(!(maxent))
{
- L<<Logger::Error<<"Zone '"<<bbd.d_name<<"' has too many empty non terminals."<<endl;
+ g_log<<Logger::Error<<"Zone '"<<bbd.d_name<<"' has too many empty non terminals."<<endl;
return;
}
BP.parse(getArg("config"));
}
catch(PDNSException &ae) {
- L<<Logger::Error<<"Error parsing bind configuration: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Error parsing bind configuration: "<<ae.reason<<endl;
throw;
}
s_binddirectory=BP.getDirectory();
// ZP.setDirectory(d_binddirectory);
- L<<Logger::Warning<<d_logprefix<<" Parsing "<<domains.size()<<" domain(s), will report when done"<<endl;
+ g_log<<Logger::Warning<<d_logprefix<<" Parsing "<<domains.size()<<" domain(s), will report when done"<<endl;
set<DNSName> oldnames, newnames;
{
++i)
{
if (!(i->hadFileDirective)) {
- L<<Logger::Warning<<d_logprefix<<" Zone '"<<i->name<<"' has no 'file' directive set in "<<getArg("config")<<endl;
+ g_log<<Logger::Warning<<d_logprefix<<" Zone '"<<i->name<<"' has no 'file' directive set in "<<getArg("config")<<endl;
rejected++;
continue;
}
if(i->type == "")
- L<<Logger::Notice<<d_logprefix<<" Zone '"<<i->name<<"' has no type specified, assuming 'native'"<<endl;
+ g_log<<Logger::Notice<<d_logprefix<<" Zone '"<<i->name<<"' has no type specified, assuming 'native'"<<endl;
if(i->type!="master" && i->type!="slave" && i->type != "native" && i->type != "") {
- L<<Logger::Warning<<d_logprefix<<" Warning! Skipping zone '"<<i->name<<"' because type '"<<i->type<<"' is invalid"<<endl;
+ g_log<<Logger::Warning<<d_logprefix<<" Warning! Skipping zone '"<<i->name<<"' because type '"<<i->type<<"' is invalid"<<endl;
rejected++;
continue;
}
newnames.insert(bbd.d_name);
if(filenameChanged || !bbd.d_loaded || !bbd.current()) {
- L<<Logger::Info<<d_logprefix<<" parsing '"<<i->name<<"' from file '"<<i->filename<<"'"<<endl;
+ g_log<<Logger::Info<<d_logprefix<<" parsing '"<<i->name<<"' from file '"<<i->filename<<"'"<<endl;
try {
parseZoneFile(&bbd);
*status+=msg.str();
bbd.d_status=msg.str();
- L<<Logger::Warning<<d_logprefix<<msg.str()<<endl;
+ g_log<<Logger::Warning<<d_logprefix<<msg.str()<<endl;
rejected++;
}
catch(std::system_error &ae) {
if(status)
*status+=msg.str();
bbd.d_status=msg.str();
- L<<Logger::Warning<<d_logprefix<<msg.str()<<endl;
+ g_log<<Logger::Warning<<d_logprefix<<msg.str()<<endl;
rejected++;
}
catch(std::exception &ae) {
*status+=msg.str();
bbd.d_status=msg.str();
- L<<Logger::Warning<<d_logprefix<<msg.str()<<endl;
+ g_log<<Logger::Warning<<d_logprefix<<msg.str()<<endl;
rejected++;
}
safePutBBDomainInfo(bbd);
if(status)
*status=msg.str();
- L<<Logger::Error<<d_logprefix<<msg.str()<<endl;
+ g_log<<Logger::Error<<d_logprefix<<msg.str()<<endl;
}
}
bbnew.d_checknow=false;
bbnew.d_wasRejectedLastReload=false;
safePutBBDomainInfo(bbnew);
- L<<Logger::Warning<<"Zone '"<<bbnew.d_name<<"' ("<<bbnew.d_filename<<") reloaded"<<endl;
+ g_log<<Logger::Warning<<"Zone '"<<bbnew.d_name<<"' ("<<bbnew.d_filename<<") reloaded"<<endl;
}
catch(PDNSException &ae) {
ostringstream msg;
msg<<" error at "+nowTime()+" parsing '"<<bbold.d_name<<"' from file '"<<bbold.d_filename<<"': "<<ae.reason;
- L<<Logger::Warning<<" error parsing '"<<bbold.d_name<<"' from file '"<<bbold.d_filename<<"': "<<ae.reason<<endl;
+ g_log<<Logger::Warning<<" error parsing '"<<bbold.d_name<<"' from file '"<<bbold.d_filename<<"': "<<ae.reason<<endl;
bbold.d_status=msg.str();
bbold.d_wasRejectedLastReload=true;
safePutBBDomainInfo(bbold);
catch(std::exception &ae) {
ostringstream msg;
msg<<" error at "+nowTime()+" parsing '"<<bbold.d_name<<"' from file '"<<bbold.d_filename<<"': "<<ae.what();
- L<<Logger::Warning<<" error parsing '"<<bbold.d_name<<"' from file '"<<bbold.d_filename<<"': "<<ae.what()<<endl;
+ g_log<<Logger::Warning<<" error parsing '"<<bbold.d_name<<"' from file '"<<bbold.d_filename<<"': "<<ae.what()<<endl;
bbold.d_status=msg.str();
bbold.d_wasRejectedLastReload=true;
safePutBBDomainInfo(bbold);
static bool mustlog=::arg().mustDo("query-logging");
if(mustlog)
- L<<Logger::Warning<<"Lookup for '"<<qtype.getName()<<"' of '"<<domain<<"' within zoneID "<<zoneId<<endl;
+ g_log<<Logger::Warning<<"Lookup for '"<<qtype.getName()<<"' of '"<<domain<<"' within zoneID "<<zoneId<<endl;
bool found=false;
BB2DomainInfo bbd;
if(!found) {
if(mustlog)
- L<<Logger::Warning<<"Found no authoritative zone for "<<qname<<endl;
+ g_log<<Logger::Warning<<"Found no authoritative zone for "<<qname<<endl;
d_handle.d_list=false;
return;
}
if(mustlog)
- L<<Logger::Warning<<"Found a zone '"<<domain<<"' (with id " << bbd.d_id<<") that might contain data "<<endl;
+ g_log<<Logger::Warning<<"Found a zone '"<<domain<<"' (with id " << bbd.d_id<<") that might contain data "<<endl;
d_handle.id=bbd.d_id;
- DLOG(L<<"Bind2Backend constructing handle for search for "<<qtype.getName()<<" for "<<
+ DLOG(g_log<<"Bind2Backend constructing handle for search for "<<qtype.getName()<<" for "<<
qname<<endl);
if(domain.empty())
}
if(!bbd.current()) {
- L<<Logger::Warning<<"Zone '"<<bbd.d_name<<"' ("<<bbd.d_filename<<") needs reloading"<<endl;
+ g_log<<Logger::Warning<<"Zone '"<<bbd.d_name<<"' ("<<bbd.d_filename<<") needs reloading"<<endl;
queueReloadAndStore(bbd.d_id);
if (!safeGetBBDomainInfo(domain, &bbd))
throw DBException("Zone '"+bbd.d_name.toLogString()+"' ("+bbd.d_filename+") gone after reload"); // if we don't throw here, we crash for some reason
d_handle.d_records = bbd.d_records.get();
if(d_handle.d_records->empty())
- DLOG(L<<"Query with no results"<<endl);
+ DLOG(g_log<<"Query with no results"<<endl);
d_handle.mustlog = mustlog;
{
if(!d_handle.d_records) {
if(d_handle.mustlog)
- L<<Logger::Warning<<"There were no answers"<<endl;
+ g_log<<Logger::Warning<<"There were no answers"<<endl;
return false;
}
if(!d_handle.get(r)) {
if(d_handle.mustlog)
- L<<Logger::Warning<<"End of answers"<<endl;
+ g_log<<Logger::Warning<<"End of answers"<<endl;
d_handle.reset();
return false;
}
if(d_handle.mustlog)
- L<<Logger::Warning<<"Returning: '"<<r.qtype.getName()<<"' of '"<<r.qname<<"', content: '"<<r.content<<"'"<<endl;
+ g_log<<Logger::Warning<<"Returning: '"<<r.qtype.getName()<<"' of '"<<r.qname<<"', content: '"<<r.content<<"'"<<endl;
return true;
}
//#define DLOG(x) x
bool Bind2Backend::handle::get_normal(DNSResourceRecord &r)
{
- DLOG(L << "Bind2Backend get() was called for "<<qtype.getName() << " record for '"<<
+ DLOG(g_log << "Bind2Backend get() was called for "<<qtype.getName() << " record for '"<<
qname<<"' - "<<d_records->size()<<" available in total!"<<endl);
if(d_iter==d_end_iter) {
}
while(d_iter!=d_end_iter && !(qtype.getCode()==QType::ANY || (d_iter)->qtype==qtype.getCode())) {
- DLOG(L<<Logger::Warning<<"Skipped "<<qname<<"/"<<QType(d_iter->qtype).getName()<<": '"<<d_iter->content<<"'"<<endl);
+ DLOG(g_log<<Logger::Warning<<"Skipped "<<qname<<"/"<<QType(d_iter->qtype).getName()<<": '"<<d_iter->content<<"'"<<endl);
d_iter++;
}
if(d_iter==d_end_iter) {
return false;
}
- DLOG(L << "Bind2Backend get() returning a rr with a "<<QType(d_iter->qtype).getCode()<<endl);
+ DLOG(g_log << "Bind2Backend get() returning a rr with a "<<QType(d_iter->qtype).getCode()<<endl);
r.qname=qname.empty() ? domain : (qname+domain);
r.domain_id=id;
return false;
d_handle.reset();
- DLOG(L<<"Bind2Backend constructing handle for list of "<<id<<endl);
+ DLOG(g_log<<"Bind2Backend constructing handle for list of "<<id<<endl);
d_handle.d_records=bbd.d_records.get(); // give it a copy, which will stay around
d_handle.d_qname_iter= d_handle.d_records->begin();
ifstream c_if(getArg("supermasters").c_str(), std::ios::in); // this was nocreate?
if (!c_if) {
- L << Logger::Error << "Unable to open supermasters file for read: " << stringerror() << endl;
+ g_log << Logger::Error << "Unable to open supermasters file for read: " << stringerror() << endl;
return false;
}
{
string filename = getArg("supermaster-destdir")+'/'+domain.toStringNoDot();
- L << Logger::Warning << d_logprefix
+ g_log << Logger::Warning << d_logprefix
<< " Writing bind config zone statement for superslave zone '" << domain
<< "' from supermaster " << ip << endl;
ofstream c_of(getArg("supermaster-config").c_str(), std::ios::app);
if (!c_of) {
- L << Logger::Error << "Unable to open supermaster configfile for append: " << stringerror() << endl;
+ g_log << Logger::Error << "Unable to open supermaster configfile for append: " << stringerror() << endl;
throw DBException("Unable to open supermaster configfile for append: "+stringerror());
}
SimpleMatch sm(pattern,true);
static bool mustlog=::arg().mustDo("query-logging");
if(mustlog)
- L<<Logger::Warning<<"Search for pattern '"<<pattern<<"'"<<endl;
+ g_log<<Logger::Warning<<"Search for pattern '"<<pattern<<"'"<<endl;
{
ReadLock rl(&s_state_lock);
Bind2Loader()
{
BackendMakers().report(new Bind2Factory);
- L << Logger::Info << "[bind2backend] This is the bind backend version " << VERSION
+ g_log << Logger::Info << "[bind2backend] This is the bind backend version " << VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
if (ns3p->d_iterations > maxNSEC3Iterations) {
ns3p->d_iterations = maxNSEC3Iterations;
- L<<Logger::Error<<"Number of NSEC3 iterations for zone '"<<name<<"' is above 'max-nsec3-iterations'. Value adjusted to: "<<maxNSEC3Iterations<<endl;
+ g_log<<Logger::Error<<"Number of NSEC3 iterations for zone '"<<name<<"' is above 'max-nsec3-iterations'. Value adjusted to: "<<maxNSEC3Iterations<<endl;
}
if (ns3p->d_algorithm != 1) {
- L<<Logger::Error<<"Invalid hash algorithm for NSEC3: '"<<std::to_string(ns3p->d_algorithm)<<"', setting to 1 for zone '"<<name<<"'."<<endl;
+ g_log<<Logger::Error<<"Invalid hash algorithm for NSEC3: '"<<std::to_string(ns3p->d_algorithm)<<"', setting to 1 for zone '"<<name<<"'."<<endl;
ns3p->d_algorithm = 1;
}
}
}
if (s_geoip_files.empty())
- L<<Logger::Warning<<"No GeoIP database files loaded!"<<endl;
+ g_log<<Logger::Warning<<"No GeoIP database files loaded!"<<endl;
config = YAML::LoadFile(getArg("zones-file"));
} else if (attr == "weight") {
rr.weight = iter->second.as<int>();
if (rr.weight < 0) {
- L<<Logger::Error<<"Weight cannot be negative for " << rr.qname << endl;
+ g_log<<Logger::Error<<"Weight cannot be negative for " << rr.qname << endl;
throw PDNSException(string("Weight cannot be negative for ") + rr.qname.toLogString());
}
rr.has_weight = true;
} else if (attr == "ttl") {
rr.ttl = iter->second.as<int>();
} else {
- L<<Logger::Error<<"Unsupported record attribute " << attr << " for " << rr.qname << endl;
+ g_log<<Logger::Error<<"Unsupported record attribute " << attr << " for " << rr.qname << endl;
throw PDNSException(string("Unsupported record attribute ") + attr + string(" for ") + rr.qname.toLogString());
}
}
}
if (!d_result.empty()) {
- L<<Logger::Error<<
+ g_log<<Logger::Error<<
"Cannot have static record and CNAME at the same time." <<
"Please fix your configuration for \"" << qdomain << "\", so that " <<
"it can be resolved by GeoIP backend directly."<< std::endl;
try {
initialize();
} catch (PDNSException &pex) {
- L<<Logger::Error<<"GeoIP backend reload failed: " << pex.reason << endl;
+ g_log<<Logger::Error<<"GeoIP backend reload failed: " << pex.reason << endl;
} catch (std::exception &stex) {
- L<<Logger::Error<<"GeoIP backend reload failed: " << stex.what() << endl;
+ g_log<<Logger::Error<<"GeoIP backend reload failed: " << stex.what() << endl;
} catch (...) {
- L<<Logger::Error<<"GeoIP backend reload failed" << endl;
+ g_log<<Logger::Error<<"GeoIP backend reload failed" << endl;
}
}
public:
GeoIPLoader() {
BackendMakers().report(new GeoIPFactory);
- L << Logger::Info << "[geoipbackend] This is the geoip backend version " VERSION
+ g_log << Logger::Info << "[geoipbackend] This is the geoip backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
if ((ec = MMDB_open(fname.c_str(), flags, &d_s)) < 0)
throw PDNSException(string("Cannot open ") + fname + string(": ") + string(MMDB_strerror(ec)));
d_lang = language;
- L<<Logger::Debug<<"Opened MMDB database "<<fname<<"(type: "<<d_s.metadata.database_type<<
+ g_log<<Logger::Debug<<"Opened MMDB database "<<fname<<"(type: "<<d_s.metadata.database_type<<
" version: "<<d_s.metadata.binary_format_major_version << "." <<
d_s.metadata.binary_format_minor_version << ")" << endl;
}
res = MMDB_lookup_string(&d_s, ip.c_str(), &gai_ec, &mmdb_ec);
if (gai_ec != 0)
- L<<Logger::Warning<<"MMDB_lookup_string("<<ip<<") failed: "<<gai_strerror(gai_ec)<<endl;
+ g_log<<Logger::Warning<<"MMDB_lookup_string("<<ip<<") failed: "<<gai_strerror(gai_ec)<<endl;
else if (mmdb_ec != MMDB_SUCCESS)
- L<<Logger::Warning<<"MMDB_lookup_string("<<ip<<") failed: "<<MMDB_strerror(mmdb_ec)<<endl;
+ g_log<<Logger::Warning<<"MMDB_lookup_string("<<ip<<") failed: "<<MMDB_strerror(mmdb_ec)<<endl;
else if (res.found_entry) {
gl.netmask = res.netmask;
/* If it's a IPv6 database, IPv4 netmasks are reduced from 128, so we need to deduct
}
catch(SSqlException &e) {
- L<<Logger::Error<<mode<<" Connection failed: "<<e.txtReason()<<endl;
+ g_log<<Logger::Error<<mode<<" Connection failed: "<<e.txtReason()<<endl;
throw PDNSException("Unable to launch "+mode+" connection: "+e.txtReason());
}
- L<<Logger::Info<<mode<<" Connection successful. Connected to database '"<<getArg("dbname")<<"' on '"<<(getArg("host").empty() ? getArg("socket") : getArg("host"))<<"'."<<endl;
+ g_log<<Logger::Info<<mode<<" Connection successful. Connected to database '"<<getArg("dbname")<<"' on '"<<(getArg("host").empty() ? getArg("socket") : getArg("host"))<<"'."<<endl;
}
void gMySQLBackend::reconnect()
gMySQLLoader()
{
BackendMakers().report(new gMySQLFactory("gmysql"));
- L << Logger::Info << "[gmysqlbackend] This is the gmysql backend version " VERSION
+ g_log << Logger::Info << "[gmysqlbackend] This is the gmysql backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
if (!d_stmt) return this;
if (d_dolog) {
- L<<Logger::Warning<<"Query: " << d_query <<endl;
+ g_log<<Logger::Warning<<"Query: " << d_query <<endl;
}
if ((err = mysql_stmt_bind_param(d_stmt, d_req_bind))) {
for(int i=0;i<d_fnum;i++) {
if (err == MYSQL_DATA_TRUNCATED && *d_res_bind[i].error) {
- L<<Logger::Warning<<"Result field at row " << d_residx << " column " << i << " has been truncated, we allocated " << d_res_bind[i].buffer_length << " bytes but at least " << *d_res_bind[i].length << " was needed" << endl;
+ g_log<<Logger::Warning<<"Result field at row " << d_residx << " column " << i << " has been truncated, we allocated " << d_res_bind[i].buffer_length << " bytes but at least " << *d_res_bind[i].length << " was needed" << endl;
}
if (*d_res_bind[i].is_null) {
row.push_back("");
void SMySQL::execute(const string& query)
{
if(s_dolog)
- L<<Logger::Warning<<"Query: "<<query<<endl;
+ g_log<<Logger::Warning<<"Query: "<<query<<endl;
int err;
if((err=mysql_query(&d_db,query.c_str())))
}
catch( SSqlException & e )
{
- L<<Logger::Error<< mode << " Connection failed: " << e.txtReason() << std::endl;
+ g_log<<Logger::Error<< mode << " Connection failed: " << e.txtReason() << std::endl;
throw PDNSException( "Unable to launch " + mode + " connection: " + e.txtReason());
}
- L << Logger::Warning << mode << " Connection successful" << std::endl;
+ g_log << Logger::Warning << mode << " Connection successful" << std::endl;
}
gODBCLoader()
{
BackendMakers().report( new gODBCFactory("godbc"));
- L<<Logger::Warning << "This is module godbcbackend reporting" << std::endl;
+ g_log<<Logger::Warning << "This is module godbcbackend reporting" << std::endl;
}
};
SQLRETURN result;
// cerr<<"execute("<<d_query<<")"<<endl;
if (d_dolog) {
- L<<Logger::Warning<<"Query: "<<d_query<<endl;
+ g_log<<Logger::Warning<<"Query: "<<d_query<<endl;
}
result = SQLExecute(d_statement);
}
catch (SSqlException &e) {
- L<<Logger::Error << mode << " Connection failed: " << e.txtReason() << endl;
+ g_log<<Logger::Error << mode << " Connection failed: " << e.txtReason() << endl;
throw PDNSException("Unable to launch " + mode + " connection: " + e.txtReason());
}
- L<<Logger::Info << mode << " Connection successful" << endl;
+ g_log<<Logger::Info << mode << " Connection successful" << endl;
}
class gOracleFactory : public BackendFactory
//! This reports us to the main UeberBackend class
gOracleLoader() {
BackendMakers().report(new gOracleFactory("goracle"));
- L << Logger::Info << "[goraclebackend] This is the goracle backend version " VERSION
+ g_log << Logger::Info << "[goraclebackend] This is the goracle backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
prepareStatement();
if (d_dolog)
- L<<Logger::Warning<<"Query: "<<d_query<<endl;
+ g_log<<Logger::Warning<<"Query: "<<d_query<<endl;
ub2 fntype;
ub4 iters;
if (d_serviceContextHandle != NULL) {
err=OCILogoff(d_serviceContextHandle, d_errorHandle);
if (err) {
- L<<Logger::Warning<<"Problems logging out: "+getOracleError()<<endl;
+ g_log<<Logger::Warning<<"Problems logging out: "+getOracleError()<<endl;
}
}
}
catch(SSqlException &e) {
- L<<Logger::Error<<mode<<" Connection failed: "<<e.txtReason()<<endl;
+ g_log<<Logger::Error<<mode<<" Connection failed: "<<e.txtReason()<<endl;
throw PDNSException("Unable to launch "+mode+" connection: "+e.txtReason());
}
- L<<Logger::Info<<mode<<" Connection successful. Connected to database '"<<getArg("dbname")<<"' on '"<<getArg("host")<<"'."<<endl;
+ g_log<<Logger::Info<<mode<<" Connection successful. Connected to database '"<<getArg("dbname")<<"' on '"<<getArg("host")<<"'."<<endl;
}
void gPgSQLBackend::reconnect()
gPgSQLLoader()
{
BackendMakers().report(new gPgSQLFactory("gpgsql"));
- L << Logger::Info << "[gpgsqlbackend] This is the gpgsql backend version " VERSION
+ g_log << Logger::Info << "[gpgsqlbackend] This is the gpgsql backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
SSqlStatement* execute() {
prepareStatement();
if (d_dolog) {
- L<<Logger::Warning<<"Query: "<<d_query<<endl;
+ g_log<<Logger::Warning<<"Query: "<<d_query<<endl;
}
d_res_set = PQexecPrepared(d_db(), d_stmt.c_str(), d_nparams, paramValues, paramLengths, NULL, 0);
ExecStatusType status = PQresultStatus(d_res_set);
#endif
// execute FETCH
if (d_dolog)
- L<<Logger::Warning<<"Query: "<<cmd<<endl;
+ g_log<<Logger::Warning<<"Query: "<<cmd<<endl;
d_res = PQexec(d_db(),cmd.c_str());
d_resnum = PQntuples(d_res);
d_fnum = PQnfields(d_res);
}
catch( SSqlException & e )
{
- L << Logger::Error << mode << ": connection failed: " << e.txtReason() << std::endl;
+ g_log << Logger::Error << mode << ": connection failed: " << e.txtReason() << std::endl;
throw PDNSException( "Unable to launch " + mode + " connection: " + e.txtReason());
}
- L << Logger::Info << mode << ": connection to '"<<getArg("database")<<"' successful" << std::endl;
+ g_log << Logger::Info << mode << ": connection to '"<<getArg("database")<<"' successful" << std::endl;
}
gSQLite3Loader()
{
BackendMakers().report( new gSQLite3Factory( "gsqlite3" ));
- L << Logger::Info << "[gsqlite3] This is the gsqlite3 backend version " VERSION
+ g_log << Logger::Info << "[gsqlite3] This is the gsqlite3 backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
}
else if ( code == -2 ) {
// Here it may be possible to retry after obtainting a fresh ticket
- L<<Logger::Debug << logPrefix << "No TGT found, trying to acquire a new one" << std::endl;
+ g_log<<Logger::Debug << logPrefix << "No TGT found, trying to acquire a new one" << std::endl;
code = updateTgt();
if ( attemptAuth( conn ) != 0 ) {
- L<<Logger::Error << logPrefix << "Failed to acquire a TGT" << std::endl;
+ g_log<<Logger::Error << logPrefix << "Failed to acquire a TGT" << std::endl;
return false;
}
}
int rc = ldap_sasl_interactive_bind_s( conn, "", defaults.mech.c_str(),
NULL, NULL, LDAP_SASL_QUIET,
ldapGssapiAuthenticatorSaslInteractCallback, &defaults );
- L<<Logger::Debug << logPrefix << "ldap_sasl_interactive_bind_s returned " << rc << std::endl;
+ g_log<<Logger::Debug << logPrefix << "ldap_sasl_interactive_bind_s returned " << rc << std::endl;
if ( rc == LDAP_LOCAL_ERROR ) {
// This may mean that the ticket has expired, so let the caller know
krb5_get_init_creds_opt *options;
if ( ( code = krb5_init_context( &context ) ) != 0 ) {
- L<<Logger::Error << logPrefix << "Failed to init krb5 context" << std::endl;
+ g_log<<Logger::Error << logPrefix << "Failed to init krb5 context" << std::endl;
return code;
}
}
if ( code != 0 ) {
- L<<Logger::Error << logPrefix << "krb5 error when locating the keytab file: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
+ g_log<<Logger::Error << logPrefix << "krb5 error when locating the keytab file: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
return code;
}
// Extract the principal name from the keytab
krb5_kt_cursor cursor;
if ( ( code = krb5_kt_start_seq_get( context, keytab, &cursor ) ) != 0 ) {
- L<<Logger::Error << logPrefix << "krb5 error when initiating keytab search: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
+ g_log<<Logger::Error << logPrefix << "krb5 error when initiating keytab search: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
krb5_kt_close( context, keytab );
return code;
}
krb5_kt_end_seq_get( context, keytab, &cursor );
if ( code != 0 ) {
- L<<Logger::Error << logPrefix << "krb5 error when extracting principal information: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
+ g_log<<Logger::Error << logPrefix << "krb5 error when extracting principal information: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
krb5_kt_close( context, keytab );
krb5_free_principal( context, principal );
return code;
}
if ( code != 0 ) {
- L<<Logger::Error << logPrefix << "krb5 error when locating the credentials cache file: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
+ g_log<<Logger::Error << logPrefix << "krb5 error when locating the credentials cache file: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
krb5_kt_close( context, keytab );
krb5_free_principal( context, principal );
return code;
// Initialize the credentials cache file
if ( ( code = krb5_cc_initialize( context, ccache, principal ) ) != 0 ) {
- L<<Logger::Error << logPrefix << "krb5 error when initializing the credentials cache file: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
+ g_log<<Logger::Error << logPrefix << "krb5 error when initializing the credentials cache file: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
krb5_kt_close( context, keytab );
krb5_free_principal( context, principal );
return code;
}
if ( ( code = krb5_get_init_creds_opt_alloc( context, &options ) ) != 0 ) {
- L<<Logger::Error << logPrefix << "krb5 error when allocating credentials cache structure: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
+ g_log<<Logger::Error << logPrefix << "krb5 error when allocating credentials cache structure: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
krb5_kt_close( context, keytab );
krb5_free_principal( context, principal );
return code;
krb5_free_principal( context, principal );
if ( code == 0 ) {
- L<<Logger::Error << logPrefix << "krb5 error when getting the TGT: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
+ g_log<<Logger::Error << logPrefix << "krb5 error when getting the TGT: " << std::string( krb5_get_error_message( context, code ) ) << std::endl;
code = krb5_cc_store_cred( context, ccache, &credentials );
krb5_free_cred_contents( context, &credentials );
krb5_cc_close( context, ccache );
hoststr += " " + hosts[ ( idx + i ) % hosts.size() ];
}
- L << Logger::Info << m_myname << " LDAP servers = " << hoststr << endl;
+ g_log << Logger::Info << m_myname << " LDAP servers = " << hoststr << endl;
m_pldap = new PowerLDAP( hoststr.c_str(), LDAP_PORT, mustDo( "starttls" ), getArgAsNum( "timeout" ) );
m_pldap->setOption( LDAP_OPT_DEREF, LDAP_DEREF_ALWAYS );
}
m_pldap->bind( m_authenticator );
- L << Logger::Notice << m_myname << " Ldap connection succeeded" << endl;
+ g_log << Logger::Notice << m_myname << " Ldap connection succeeded" << endl;
return;
}
catch( LDAPTimeout < )
{
- L << Logger::Error << m_myname << " Ldap connection to server failed because of timeout" << endl;
+ g_log << Logger::Error << m_myname << " Ldap connection to server failed because of timeout" << endl;
}
catch( LDAPException &le )
{
- L << Logger::Error << m_myname << " Ldap connection to server failed: " << le.what() << endl;
+ g_log << Logger::Error << m_myname << " Ldap connection to server failed: " << le.what() << endl;
}
catch( std::exception &e )
{
- L << Logger::Error << m_myname << " Caught STL exception: " << e.what() << endl;
+ g_log << Logger::Error << m_myname << " Caught STL exception: " << e.what() << endl;
}
if( m_pldap != NULL ) { delete( m_pldap ); }
{
delete( m_pldap );
delete( m_authenticator );
- L << Logger::Notice << m_myname << " Ldap connection closed" << endl;
+ g_log << Logger::Notice << m_myname << " Ldap connection closed" << endl;
}
int attempts = m_reconnect_attempts;
bool connected = false;
while ( !connected && attempts > 0 ) {
- L << Logger::Debug << m_myname << " Reconnection attempts left: " << attempts << endl;
+ g_log << Logger::Debug << m_myname << " Reconnection attempts left: " << attempts << endl;
connected = m_pldap->connect();
if ( !connected )
Utility::usleep( 250 );
}
catch( LDAPTimeout < )
{
- L << Logger::Warning << m_myname << " Unable to get zone " << target << " from LDAP directory: " << lt.what() << endl;
+ g_log << Logger::Warning << m_myname << " Unable to get zone " << target << " from LDAP directory: " << lt.what() << endl;
throw( DBException( "LDAP server timeout" ) );
}
catch( LDAPNoConnection &lnc )
{
- L << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
+ g_log << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
if ( reconnect() )
this->list( target, domain_id );
else
}
catch( LDAPException &le )
{
- L << Logger::Error << m_myname << " Unable to get zone " << target << " from LDAP directory: " << le.what() << endl;
+ g_log << Logger::Error << m_myname << " Unable to get zone " << target << " from LDAP directory: " << le.what() << endl;
throw( PDNSException( "LDAP server unreachable" ) ); // try to reconnect to another server
}
catch( std::exception &e )
{
- L << Logger::Error << m_myname << " Caught STL exception for target " << target << ": " << e.what() << endl;
+ g_log << Logger::Error << m_myname << " Caught STL exception for target " << target << ": " << e.what() << endl;
throw( DBException( "STL exception" ) );
}
prepare();
filter = strbind( ":target:", "associatedDomain=*." + qesc, getArg( "filter-axfr" ) );
- DLOG( L << Logger::Debug << m_myname << " Search = basedn: " << dn << ", filter: " << filter << endl );
+ DLOG( g_log << Logger::Debug << m_myname << " Search = basedn: " << dn << ", filter: " << filter << endl );
m_msgid = m_pldap->search( dn, LDAP_SCOPE_SUBTREE, filter, (const char**) ldap_attrany );
return true;
{
if( target.isPartOf(DNSName("in-addr.arpa")) || target.isPartOf(DNSName("ip6.arpa")) )
{
- L << Logger::Warning << m_myname << " Request for reverse zone AXFR, but this is not supported in strict mode" << endl;
+ g_log << Logger::Warning << m_myname << " Request for reverse zone AXFR, but this is not supported in strict mode" << endl;
return false; // AXFR isn't supported in strict mode. Use simple mode and additional PTR records
}
m_adomain = m_adomains.end(); // skip loops in get() first time
m_qtype = qtype;
- if( m_qlog ) { L.log( "Query: '" + qname.toStringRootDot() + "|" + qtype.getName() + "'", Logger::Error ); }
+ if( m_qlog ) { g_log.log( "Query: '" + qname.toStringRootDot() + "|" + qtype.getName() + "'", Logger::Error ); }
(this->*m_lookup_fcnt)( qtype, qname, dnspkt, zoneid );
}
catch( LDAPTimeout < )
{
- L << Logger::Warning << m_myname << " Unable to search LDAP directory: " << lt.what() << endl;
+ g_log << Logger::Warning << m_myname << " Unable to search LDAP directory: " << lt.what() << endl;
throw( DBException( "LDAP server timeout" ) );
}
catch( LDAPNoConnection &lnc )
{
- L << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
+ g_log << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
if ( reconnect() )
this->lookup( qtype, qname, dnspkt, zoneid );
else
}
catch( LDAPException &le )
{
- L << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
+ g_log << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
throw( PDNSException( "LDAP server unreachable" ) ); // try to reconnect to another server
}
catch( std::exception &e )
{
- L << Logger::Error << m_myname << " Caught STL exception for qname " << qname << ": " << e.what() << endl;
+ g_log << Logger::Error << m_myname << " Caught STL exception for qname " << qname << ": " << e.what() << endl;
throw( DBException( "STL exception" ) );
}
}
filter = strbind( ":target:", filter, getArg( "filter-lookup" ) );
- DLOG( L << Logger::Debug << m_myname << " Search = basedn: " << getArg( "basedn" ) << ", filter: " << filter << ", qtype: " << qtype.getName() << endl );
+ DLOG( g_log << Logger::Debug << m_myname << " Search = basedn: " << getArg( "basedn" ) << ", filter: " << filter << ", qtype: " << qtype.getName() << endl );
m_msgid = m_pldap->search( getArg( "basedn" ), LDAP_SCOPE_SUBTREE, filter, attributes );
}
filter = strbind( ":target:", filter, getArg( "filter-lookup" ) );
- DLOG( L << Logger::Debug << m_myname << " Search = basedn: " << getArg( "basedn" ) << ", filter: " << filter << ", qtype: " << qtype.getName() << endl );
+ DLOG( g_log << Logger::Debug << m_myname << " Search = basedn: " << getArg( "basedn" ) << ", filter: " << filter << ", qtype: " << qtype.getName() << endl );
m_msgid = m_pldap->search( getArg( "basedn" ), LDAP_SCOPE_SUBTREE, filter, attributes );
}
dn = "dc=" + *i + "," + dn;
}
- DLOG( L << Logger::Debug << m_myname << " Search = basedn: " << dn + getArg( "basedn" ) << ", filter: " << filter << ", qtype: " << qtype.getName() << endl );
+ DLOG( g_log << Logger::Debug << m_myname << " Search = basedn: " << dn + getArg( "basedn" ) << ", filter: " << filter << ", qtype: " << qtype.getName() << endl );
m_msgid = m_pldap->search( dn + getArg( "basedn" ), LDAP_SCOPE_BASE, filter, attributes );
}
m_ttl = (uint32_t) strtol( m_result["dNSTTL"][0].c_str(), &endptr, 10 );
if( *endptr != '\0' )
{
- L << Logger::Warning << m_myname << " Invalid time to live for " << m_qname << ": " << m_result["dNSTTL"][0] << endl;
+ g_log << Logger::Warning << m_myname << " Invalid time to live for " << m_qname << ": " << m_result["dNSTTL"][0] << endl;
m_ttl = m_default_ttl;
}
m_result.erase( "dNSTTL" );
{
if( ( m_last_modified = str2tstamp( m_result["modifyTimestamp"][0] ) ) == 0 )
{
- L << Logger::Warning << m_myname << " Invalid modifyTimestamp for " << m_qname << ": " << m_result["modifyTimestamp"][0] << endl;
+ g_log << Logger::Warning << m_myname << " Invalid modifyTimestamp for " << m_qname << ": " << m_result["modifyTimestamp"][0] << endl;
}
m_result.erase( "modifyTimestamp" );
}
rr.content = *m_value;
m_value++;
- DLOG( L << Logger::Debug << m_myname << " Record = qname: " << rr.qname << ", qtype: " << (rr.qtype).getName() << ", ttl: " << rr.ttl << ", content: " << rr.content << endl );
+ DLOG( g_log << Logger::Debug << m_myname << " Record = qname: " << rr.qname << ", qtype: " << (rr.qtype).getName() << ", ttl: " << rr.ttl << ", content: " << rr.content << endl );
return true;
}
}
catch( LDAPTimeout < )
{
- L << Logger::Warning << m_myname << " Search failed: " << lt.what() << endl;
+ g_log << Logger::Warning << m_myname << " Search failed: " << lt.what() << endl;
throw( DBException( "LDAP server timeout" ) );
}
catch( LDAPException &le )
{
- L << Logger::Error << m_myname << " Search failed: " << le.what() << endl;
+ g_log << Logger::Error << m_myname << " Search failed: " << le.what() << endl;
throw( PDNSException( "LDAP server unreachable" ) ); // try to reconnect to another server
}
catch( std::exception &e )
{
- L << Logger::Error << m_myname << " Caught STL exception for " << m_qname << ": " << e.what() << endl;
+ g_log << Logger::Error << m_myname << " Caught STL exception for " << m_qname << ": " << e.what() << endl;
throw( DBException( "STL exception" ) );
}
}
catch( LDAPTimeout < )
{
- L << Logger::Warning << m_myname << " Unable to search LDAP directory: " << lt.what() << endl;
+ g_log << Logger::Warning << m_myname << " Unable to search LDAP directory: " << lt.what() << endl;
throw( DBException( "LDAP server timeout" ) );
}
catch( LDAPNoConnection &lnc )
{
- L << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
+ g_log << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
if ( reconnect() )
this->getUpdatedMasters( domains );
else
}
catch( LDAPException &le )
{
- L << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
+ g_log << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
throw( PDNSException( "LDAP server unreachable" ) ); // try to reconnect to another server
}
catch( std::exception &e )
}
catch( LDAPTimeout < )
{
- L << Logger::Warning << m_myname << " Unable to search LDAP directory: " << lt.what() << endl;
+ g_log << Logger::Warning << m_myname << " Unable to search LDAP directory: " << lt.what() << endl;
throw( DBException( "LDAP server timeout" ) );
}
catch( LDAPNoConnection &lnc )
{
- L << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
+ g_log << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
if ( reconnect() )
this->setNotified( id, serial );
else
}
catch( LDAPException &le )
{
- L << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
+ g_log << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
throw( PDNSException( "LDAP server unreachable" ) ); // try to reconnect to another server
}
catch( std::exception &e )
}
catch( LDAPNoConnection &lnc )
{
- L << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
+ g_log << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
if ( reconnect() )
this->setNotified( id, serial );
else
}
catch( LDAPException &le )
{
- L << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
+ g_log << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
throw( PDNSException( "LDAP server unreachable" ) ); // try to reconnect to another server
}
catch( std::exception &e )
}
catch( LDAPTimeout < )
{
- L << Logger::Warning << m_myname << " Unable to search LDAP directory: " << lt.what() << endl;
+ g_log << Logger::Warning << m_myname << " Unable to search LDAP directory: " << lt.what() << endl;
throw( DBException( "LDAP server timeout" ) );
}
catch( LDAPNoConnection &lnc )
{
- L << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
+ g_log << Logger::Warning << m_myname << " Connection to LDAP lost, trying to reconnect" << endl;
if ( reconnect() )
this->getDomainInfo( domain, di );
else
}
catch( LDAPException &le )
{
- L << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
+ g_log << Logger::Error << m_myname << " Unable to search LDAP directory: " << le.what() << endl;
throw( PDNSException( "LDAP server unreachable" ) ); // try to reconnect to another server
}
catch( std::exception &e )
LdapLoader()
{
BackendMakers().report( &factory );
- L << Logger::Info << "[ldapbackend] This is the ldap backend version " VERSION
+ g_log << Logger::Info << "[ldapbackend] This is the ldap backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
~Lua2BackendAPIv2();
- #define logCall(func, var) { if (d_debug_log) { L<<Logger::Debug<<"["<<getPrefix()<<"] Calling "<<func<<"("<<var<<")"<< endl; } }
- #define logResult(var) { if (d_debug_log) { L<<Logger::Debug<<"["<<getPrefix()<<"] Got result " << "'" << var << "'" << endl; } }
+ #define logCall(func, var) { if (d_debug_log) { g_log<<Logger::Debug<<"["<<getPrefix()<<"] Calling "<<func<<"("<<var<<")"<< endl; } }
+ #define logResult(var) { if (d_debug_log) { g_log<<Logger::Debug<<"["<<getPrefix()<<"] Got result " << "'" << var << "'" << endl; } }
virtual void postPrepareContext() override {
AuthLua4::postPrepareContext();
throw PDNSException("dns_dnssec is true but dns_get_before_and_after_names_absolute is missing");
/* domain keys is not strictly speaking necessary for dnssec backend */
if (f_get_domain_keys == nullptr)
- L<<Logger::Warning<<"dns_get_domain_keys missing - cannot do live signing"<<endl;
+ g_log<<Logger::Warning<<"dns_get_domain_keys missing - cannot do live signing"<<endl;
}
}
else if (item.first == "scopeMask")
rec.scopeMask = boost::get<int>(item.second);
else
- L<<Logger::Warning<<"Unsupported key '"<<item.first<<"' in lookup or list result"<<endl;
+ g_log<<Logger::Warning<<"Unsupported key '"<<item.first<<"' in lookup or list result"<<endl;
}
logResult(rec.qname<<" IN "<<rec.qtype.getName()<<" "<<rec.ttl<<" "<<rec.getZoneRepresentation());
d_result.push_back(rec);
}
if (d_result.empty() && d_debug_log)
- L<<Logger::Debug<<"["<<getPrefix()<<"] Got empty result"<<endl;
+ g_log<<Logger::Debug<<"["<<getPrefix()<<"] Got empty result"<<endl;
}
bool list(const DNSName &target, int domain_id, bool include_disabled=false) override {
if (f_list == nullptr) {
- L<<Logger::Error<<"["<<getPrefix()<<"] dns_list missing - cannot do AXFR"<<endl;
+ g_log<<Logger::Error<<"["<<getPrefix()<<"] dns_list missing - cannot do AXFR"<<endl;
return false;
}
else if (item.first == "kind")
di.kind = DomainInfo::stringToKind(boost::get<string>(item.second));
else
- L<<Logger::Warning<<"Unsupported key '"<<item.first<<"' in domaininfo result"<<endl;
+ g_log<<Logger::Warning<<"Unsupported key '"<<item.first<<"' in domaininfo result"<<endl;
}
di.backend = this;
logResult("zone="<<di.zone<<",serial="<<di.serial<<",kind="<<di.getKindString());
else if (item.first == "active")
key.active = boost::get<bool>(item.second);
else
- L<<Logger::Warning<<"["<<getPrefix()<<"] Unsupported key '"<<item.first<<"' in keydata result"<<endl;
+ g_log<<Logger::Warning<<"["<<getPrefix()<<"] Unsupported key '"<<item.first<<"' in keydata result"<<endl;
}
logResult("id="<<key.id<<",flags="<<key.flags<<",active="<<(key.active ? "true" : "false"));
keys.push_back(key);
before_and_after_names_result_t row = boost::get<before_and_after_names_result_t>(result);
if (row.size() != 3) {
- L<<Logger::Error<<"Invalid result from dns_get_before_and_after_names_absolute, expected array with 3 items, got "<<row.size()<<"item(s)"<<endl;
+ g_log<<Logger::Error<<"Invalid result from dns_get_before_and_after_names_absolute, expected array with 3 items, got "<<row.size()<<"item(s)"<<endl;
return false;
}
for(const auto& item: row) {
else if (item.first == "after")
after = value;
else {
- L<<Logger::Error<<"Invalid result from dns_get_before_and_after_names_absolute, unexpected key "<<item.first<<endl;
+ g_log<<Logger::Error<<"Invalid result from dns_get_before_and_after_names_absolute, unexpected key "<<item.first<<endl;
return false;
}
}
{
BackendMakers().report(new Lua2Factory);
- L << Logger::Info << "[lua2backend] This is the lua2 backend version " VERSION
+ g_log << Logger::Info << "[lua2backend] This is the lua2 backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
if(f_lua_updatednssecorderandauth == 0) {
if(logging)
- L << Logger::Info << backend_name << "(updateDNSSECOrderAndAuth) domain_id: '" << domain_id << "' zonename: '" << zonename << "' qname: '" << qname << "' auth: '" << auth << "'" << endl;
+ g_log << Logger::Info << backend_name << "(updateDNSSECOrderAndAuth) domain_id: '" << domain_id << "' zonename: '" << zonename << "' qname: '" << qname << "' auth: '" << auth << "'" << endl;
string ins=qname.makeRelative(zonename).makeLowerCase().labelReverse().toString(" ", false);
return this->updateDNSSECOrderAndAuthAbsolute(domain_id, qname, ins, auth);
}
if(logging)
- L << Logger::Info << backend_name << "(updateDNSSECOrderAndAuth) BEGIN domain_id: '" << domain_id << "' zonename: '" << zonename << "' qname: '" << qname << "' auth: '" << auth << "'" << endl;
+ g_log << Logger::Info << backend_name << "(updateDNSSECOrderAndAuth) BEGIN domain_id: '" << domain_id << "' zonename: '" << zonename << "' qname: '" << qname << "' auth: '" << auth << "'" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_updatednssecorderandauth);
lua_pop(lua, 1);
if(logging)
- L << Logger::Info << backend_name << "(updateDNSSECOrderAndAuth) END" << endl;
+ g_log << Logger::Info << backend_name << "(updateDNSSECOrderAndAuth) END" << endl;
return ok;
}
return false;
if(logging)
- L << Logger::Info << backend_name << "(updateDNSSECOrderAndAuthAbsolute) BEGIN domain_id: '" << domain_id << "' qname: '" << qname << "' ordername: '" << ordername << "' auth: '" << auth << "'" << endl;
+ g_log << Logger::Info << backend_name << "(updateDNSSECOrderAndAuthAbsolute) BEGIN domain_id: '" << domain_id << "' qname: '" << qname << "' ordername: '" << ordername << "' auth: '" << auth << "'" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_updatednssecorderandauthabsolute);
lua_pop(lua, 1);
if(logging)
- L << Logger::Info << backend_name << "(updateDNSSECOrderAndAuthAbsolute) END" << endl;
+ g_log << Logger::Info << backend_name << "(updateDNSSECOrderAndAuthAbsolute) END" << endl;
return ok;
}
after.clear();
if(logging)
- L << Logger::Info << backend_name << "(getBeforeAndAfterNamesAbsolute) BEGIN id: '" << id << "' qname: '" << qname << "'" << endl;
+ g_log << Logger::Info << backend_name << "(getBeforeAndAfterNamesAbsolute) BEGIN id: '" << id << "' qname: '" << qname << "'" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_updatednssecorderandauthabsolute);
if (!ok) {
if(logging)
- L << Logger::Info << backend_name << "(getBeforeAndAfterNamesAbsolute) ERROR!" << endl;
+ g_log << Logger::Info << backend_name << "(getBeforeAndAfterNamesAbsolute) ERROR!" << endl;
return false;
}
lua_pop(lua, 1);
if(logging)
- L << Logger::Info << backend_name << "(getBeforeAndAfterNamesAbsolute) END unhashed: '" << unhashed << "' before: '" << before << "' after: '" << after << "' " << endl;
+ g_log << Logger::Info << backend_name << "(getBeforeAndAfterNamesAbsolute) END unhashed: '" << unhashed << "' before: '" << before << "' after: '" << after << "' " << endl;
return ok;
}
return false;
if(logging)
- L << Logger::Info << backend_name << "(updateDomainKey) BEGIN name: '" << name << "' id: '" << id << "' toowhat: '" << toowhat << "'" << endl;
+ g_log << Logger::Info << backend_name << "(updateDomainKey) BEGIN name: '" << name << "' id: '" << id << "' toowhat: '" << toowhat << "'" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_updatedomainkey);
lua_pop(lua, 1);
if(logging)
- L << Logger::Info << backend_name << "(updateDomainKey) END" << endl;
+ g_log << Logger::Info << backend_name << "(updateDomainKey) END" << endl;
return ok;
}
return updateDomainKey(name, id, true);
if(logging)
- L << Logger::Info << backend_name << "(activateDomainKey) BEGIN name: '" << name << "' id: '" << id << endl;
+ g_log << Logger::Info << backend_name << "(activateDomainKey) BEGIN name: '" << name << "' id: '" << id << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_activatedomainkey);
lua_pop(lua, 1);
if(logging)
- L << Logger::Info << backend_name << "(activateDomainKey) END" << endl;
+ g_log << Logger::Info << backend_name << "(activateDomainKey) END" << endl;
return ok;
}
return updateDomainKey(name, id, false);
if(logging)
- L << Logger::Info << backend_name << "(deactivateDomainKey) BEGIN name: '" << name << "' id: '" << id << endl;
+ g_log << Logger::Info << backend_name << "(deactivateDomainKey) BEGIN name: '" << name << "' id: '" << id << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_deactivatedomainkey);
lua_pop(lua, 1);
if(logging)
- L << Logger::Info << backend_name << "(deactivateDomainKey) END" << endl;
+ g_log << Logger::Info << backend_name << "(deactivateDomainKey) END" << endl;
return ok;
}
return false;
if(logging)
- L << Logger::Info << backend_name << "(removeDomainKey) BEGIN name: '" << name << "' id: '" << id << endl;
+ g_log << Logger::Info << backend_name << "(removeDomainKey) BEGIN name: '" << name << "' id: '" << id << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_removedomainkey);
lua_pop(lua, 1);
if(logging)
- L << Logger::Info << backend_name << "(removeDomainKey) END" << endl;
+ g_log << Logger::Info << backend_name << "(removeDomainKey) END" << endl;
return ok;
}
return false;
if(logging)
- //L << Logger::Info << backend_name << "(addDomainKey) BEGIN name: '" << name << "' id: '" << id << endl;
+ //g_log << Logger::Info << backend_name << "(addDomainKey) BEGIN name: '" << name << "' id: '" << id << endl;
cerr << backend_name << "(addDomainKey) BEGIN name: '" << name << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_adddomainkey);
return false;
if(logging)
- L << Logger::Info << backend_name << "(getDomainKeys) BEGIN name: '" << name << endl;
+ g_log << Logger::Info << backend_name << "(getDomainKeys) BEGIN name: '" << name << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_getdomainkeys);
if (returnedwhat != LUA_TTABLE) {
lua_pop(lua, 1);
if(logging)
- L << Logger::Info << backend_name << "(getDomainKeys) ERROR!" << endl;
+ g_log << Logger::Info << backend_name << "(getDomainKeys) ERROR!" << endl;
return false;
}
}
if(logging)
- L << Logger::Info << backend_name << "(getDomainKeys) END" << endl;
+ g_log << Logger::Info << backend_name << "(getDomainKeys) END" << endl;
return j > 0;
}
return false;
if(logging)
- L << Logger::Info << backend_name << "(getTSIGKey) BEGIN name: '" << name << "'" << endl;
+ g_log << Logger::Info << backend_name << "(getTSIGKey) BEGIN name: '" << name << "'" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_gettsigkey);
if ( (lua_type(lua, -1) != LUA_TSTRING) && (lua_type(lua, -2) != LUA_TSTRING) ) {
lua_pop(lua, 2);
if(logging)
- L << Logger::Info << backend_name << "(getTSIGKey) ERROR" << endl;
+ g_log << Logger::Info << backend_name << "(getTSIGKey) ERROR" << endl;
return false;
}
*content = c;
if(logging)
- L << Logger::Info << backend_name << "(getTSIGKey) END" << endl;
+ g_log << Logger::Info << backend_name << "(getTSIGKey) END" << endl;
return true;
}
return false;
if(logging)
- L << Logger::Info << backend_name << "(setDomainMetadata) BEGIN name: '" << name << "' kind: '" << kind << "'" << endl;
+ g_log << Logger::Info << backend_name << "(setDomainMetadata) BEGIN name: '" << name << "' kind: '" << kind << "'" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_setdomainmetadata);
lua_pop(lua, 1);
if(logging)
- L << Logger::Info << backend_name << "(setDomainMetadata) END" << endl;
+ g_log << Logger::Info << backend_name << "(setDomainMetadata) END" << endl;
return ok;
return false;
if(logging)
- L << Logger::Info << backend_name << "(getDomainMetadata) BEGIN name: '" << name << "' kind: '" << kind << "'" << endl;
+ g_log << Logger::Info << backend_name << "(getDomainMetadata) BEGIN name: '" << name << "' kind: '" << kind << "'" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_getdomainmetadata);
}
if(logging)
- L << Logger::Info << backend_name << "(getDomainMetadata) END" << endl;
+ g_log << Logger::Info << backend_name << "(getDomainMetadata) END" << endl;
return j > 0;
return;
if(logging)
- L << Logger::Info << backend_name << "(alsonotifies) BEGIN domain: '" << domain << "'" << endl;
+ g_log << Logger::Info << backend_name << "(alsonotifies) BEGIN domain: '" << domain << "'" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_alsonotifies);
}
if(logging)
- L << Logger::Info << backend_name << "(alsoNotifies) END" << endl;
+ g_log << Logger::Info << backend_name << "(alsoNotifies) END" << endl;
return;
space = " ";
}
- L.log(lb->backend_name + s.str(), (Logger::Urgency) log_level);
+ g_log.log(lb->backend_name + s.str(), (Logger::Urgency) log_level);
return 0;
}
{
BackendMakers().report(new LUAFactory);
- L << Logger::Info << "[luabackend] This is the lua backend version " VERSION
+ g_log << Logger::Info << "[luabackend] This is the lua backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
#include <string>
using std::string;
-//#undef L
return;
if (logging)
- L << Logger::Info << backend_name << "(getUpdatedMasters) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(getUpdatedMasters) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_getupdatedmasters);
domains_from_table(domains, "getUpdatedMasters");
if (logging)
- L << Logger::Info << backend_name << "(getUpdatedMasters) END" << endl;
+ g_log << Logger::Info << backend_name << "(getUpdatedMasters) END" << endl;
}
void LUABackend::setNotified(uint32_t id, uint32_t serial) {
return;
if (logging)
- L << Logger::Info << backend_name << "(setNotified) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(setNotified) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_setnotified);
}
if (logging)
- L << Logger::Info << backend_name << "(setNotified) END" << endl;
+ g_log << Logger::Info << backend_name << "(setNotified) END" << endl;
}
}
catch(LUAException &e) {
- L<<Logger::Error<<backend_name<<"Error: "<<e.what<<endl;
+ g_log<<Logger::Error<<backend_name<<"Error: "<<e.what<<endl;
throw PDNSException(e.what);
}
LUABackend::~LUABackend() {
try {
- L<<Logger::Info<<backend_name<<"Closing..." << endl;
+ g_log<<Logger::Info<<backend_name<<"Closing..." << endl;
}
catch (...) {
}
bool LUABackend::list(const DNSName &target, int domain_id, bool include_disabled) {
if (logging)
- L << Logger::Info << backend_name << "(list) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(list) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_list);
lua_pop(lua, 1);
if (logging)
- L << Logger::Info << backend_name << "(list) END" << endl;
+ g_log << Logger::Info << backend_name << "(list) END" << endl;
return ok;
}
void LUABackend::lookup(const QType &qtype, const DNSName &qname, DNSPacket *p, int domain_id) {
if (logging)
- L << Logger::Info << backend_name << "(lookup) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(lookup) BEGIN" << endl;
dnspacket = p;
dnspacket = NULL;
if (logging)
- L << Logger::Info << backend_name << "(lookup) END" << endl;
+ g_log << Logger::Info << backend_name << "(lookup) END" << endl;
}
bool LUABackend::get(DNSResourceRecord &rr) {
if (logging)
- L << Logger::Info << backend_name << "(get) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(get) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_get);
lua_pop(lua, 1 );
if (logging)
- L << Logger::Info << backend_name << "(get) END" << endl;
+ g_log << Logger::Info << backend_name << "(get) END" << endl;
return !rr.content.empty();
}
bool LUABackend::getSOA(const DNSName &name, SOAData &soadata, bool unmodifiedSerial) {
if (logging)
- L << Logger::Info << backend_name << "(getsoa) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(getsoa) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_getsoa);
if (!getValueFromTable(lua, "nameserver", soadata.nameserver)) {
soadata.nameserver = DNSName(arg()["default-soa-name"]);
if (soadata.nameserver.empty()) {
- L<<Logger::Error << backend_name << "(getSOA)" << " Error: SOA Record is missing nameserver for the domain '" << name << "'" << endl;
+ g_log<<Logger::Error << backend_name << "(getSOA)" << " Error: SOA Record is missing nameserver for the domain '" << name << "'" << endl;
lua_pop(lua, 1 );
return false;
}
lua_pop(lua, 1 );
if (logging)
- L << Logger::Info << backend_name << "(getsoa) END" << endl;
+ g_log << Logger::Info << backend_name << "(getsoa) END" << endl;
return true;
}
return;
if (logging)
- L << Logger::Info << backend_name << "(rediscover) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(rediscover) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_rediscover);
*status = s;
if (logging)
- L << Logger::Info << backend_name << "(rediscover) END" << endl;
+ g_log << Logger::Info << backend_name << "(rediscover) END" << endl;
return;
}
return false;
if (logging)
- L << Logger::Info << backend_name << "(startTransaction) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(startTransaction) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_starttransaction);
lua_pop(lua, 1);
if (logging)
- L << Logger::Info << backend_name << "(startTransaction) END" << endl;
+ g_log << Logger::Info << backend_name << "(startTransaction) END" << endl;
return ok;
}
return false;
if (logging)
- L << Logger::Info << backend_name << "(commitTransaction) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(commitTransaction) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_committransaction);
lua_pop(lua, 1);
if (logging)
- L << Logger::Info << backend_name << "(commitTransaction) END" << endl;
+ g_log << Logger::Info << backend_name << "(commitTransaction) END" << endl;
return ok;
}
return false;
if (logging)
- L << Logger::Info << backend_name << "(abortTransaction) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(abortTransaction) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_aborttransaction);
lua_pop(lua, 1);
if (logging)
- L << Logger::Info << backend_name << "(abortTransaction) END" << endl;
+ g_log << Logger::Info << backend_name << "(abortTransaction) END" << endl;
return ok;
}
return false;
if (logging)
- L << Logger::Info << backend_name << "(feedRecord) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(feedRecord) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_feedrecord);
dnsrr_to_table(lua, &rr);
lua_pop(lua, 1);
if (logging)
- L << Logger::Info << backend_name << "(feedRecord) END" << endl;
+ g_log << Logger::Info << backend_name << "(feedRecord) END" << endl;
return ok;
}
return;
if (logging)
- L << Logger::Info << backend_name << "(setFresh) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(setFresh) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_setfresh);
}
if (logging)
- L << Logger::Info << backend_name << "(setFresh) END" << endl;
+ g_log << Logger::Info << backend_name << "(setFresh) END" << endl;
}
return;
if (logging)
- L << Logger::Info << backend_name << "(getUnfreshSlaveInfos) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(getUnfreshSlaveInfos) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_getunfreshslaveinfos);
domains_from_table(domains, "getUnfreshSlaveInfos");
if (logging)
- L << Logger::Info << backend_name << "(getUnfreshSlaveInfos) END" << endl;
+ g_log << Logger::Info << backend_name << "(getUnfreshSlaveInfos) END" << endl;
}
return false;
if (logging)
- L << Logger::Error << backend_name << "(isMaster) BEGIN" << endl;
+ g_log << Logger::Error << backend_name << "(isMaster) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_ismaster);
lua_pop(lua, 1);
if (logging)
- L << Logger::Info << backend_name << "(isMaster) END" << endl;
+ g_log << Logger::Info << backend_name << "(isMaster) END" << endl;
return ok;
}
return false;
if (logging)
- L << Logger::Info << backend_name << "(getDomainInfo) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(getDomainInfo) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_getdomaininfo);
}
if (logging)
- L << Logger::Info << backend_name << "(getDomainInfo) END" << endl;
+ g_log << Logger::Info << backend_name << "(getDomainInfo) END" << endl;
return domaininfo_from_table(&di);
}
return false;
if (logging)
- L << Logger::Info << backend_name << "(superMasterBackend) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(superMasterBackend) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_supermasterbackend);
}
if (logging)
- L << Logger::Info << backend_name << "(superMasterBackend) END" << endl;
+ g_log << Logger::Info << backend_name << "(superMasterBackend) END" << endl;
return ok;
}
return false;
if (logging)
- L << Logger::Info << backend_name << "(createSlaveDomain) BEGIN" << endl;
+ g_log << Logger::Info << backend_name << "(createSlaveDomain) BEGIN" << endl;
lua_rawgeti(lua, LUA_REGISTRYINDEX, f_lua_createslavedomain);
lua_pop(lua, 1);
if (logging)
- L << Logger::Info << backend_name << "(createSlaveDomain) END" << endl;
+ g_log << Logger::Info << backend_name << "(createSlaveDomain) END" << endl;
return ok;
}
d_db->setLog(::arg().mustDo("query-logging"));
}
catch(SSqlException &e) {
- L<<Logger::Error<<backendName<<" Connection failed: "<<e.txtReason()<<endl;
+ g_log<<Logger::Error<<backendName<<" Connection failed: "<<e.txtReason()<<endl;
throw PDNSException(backendName+"Unable to launch connection: "+e.txtReason());
}
d_useminimalttl=mustDo("use-minimal-ttl");
d_minimum=0;
- L<<Logger::Warning<<backendName<<" Connection successful"<<endl;
+ g_log<<Logger::Warning<<backendName<<" Connection successful"<<endl;
try {
d_basicQuery_stmt = d_db->prepare(basicQuery, 4);
d_anyQuery_stmt = d_db->prepare(anyQuery, 5);
} catch (SSqlException &e) {
- L<<Logger::Error<<"Cannot prepare statements: " << e.txtReason() <<endl;
+ g_log<<Logger::Error<<"Cannot prepare statements: " << e.txtReason() <<endl;
throw PDNSException("Cannot prepare statements: " + e.txtReason());
}
// keeps static analyzers happy
d_minimum = pdns_stou(d_result[0][1]);
if (d_result.size()>1) {
- L<<Logger::Warning<<backendName<<" Found more than one matching origin for zone ID: "<<zoneId<<endl;
+ g_log<<Logger::Warning<<backendName<<" Found more than one matching origin for zone ID: "<<zoneId<<endl;
};
try {
soadata.db = this;
if (d_result.size()>1) {
- L<<Logger::Warning<<backendName<<" Found more than one matching zone for: "<<name<<endl;
+ g_log<<Logger::Warning<<backendName<<" Found more than one matching zone for: "<<name<<endl;
};
return true;
return;
}
- DLOG(L<<Logger::Debug<<"MyDNSBackend::lookup(" << qtype.getName() << "," << qname << ",p," << zoneId << ")" << endl);
+ DLOG(g_log<<Logger::Debug<<"MyDNSBackend::lookup(" << qtype.getName() << "," << qname << ",p," << zoneId << ")" << endl);
if (zoneId < 0) {
// First off we need to work out what zone we're working with
if (found) {
while (d_result.size()>1) {
- L<<Logger::Warning<<backendName<<" Found more than one matching zone for: "+d_origin<<endl;
+ g_log<<Logger::Warning<<backendName<<" Found more than one matching zone for: "+d_origin<<endl;
};
// We found the zoneId, so we can work out how to find our rr
string host;
try {
if (qtype.getCode()==QType::ANY) {
- DLOG(L<<Logger::Debug<<"Running d_anyQuery_stmt with " << zoneId << ", " << host << ", " << sdom << ", " << zoneId <<" , "<< qname << ", " << qtype.getName() << endl);
+ DLOG(g_log<<Logger::Debug<<"Running d_anyQuery_stmt with " << zoneId << ", " << host << ", " << sdom << ", " << zoneId <<" , "<< qname << ", " << qtype.getName() << endl);
d_query_stmt = &d_anyQuery_stmt;
(*d_query_stmt)->
bind("domain_id", zoneId)->
bind("qname2", sdom.toString())->
execute();
} else {
- DLOG(L<<Logger::Debug<<"Running d_basicQuery_stmt with " << zoneId << ", " << host << ", " << qname << ", " << qtype.getName() << endl);
+ DLOG(g_log<<Logger::Debug<<"Running d_basicQuery_stmt with " << zoneId << ", " << host << ", " << qname << ", " << qtype.getName() << endl);
d_query_stmt = &d_basicQuery_stmt;
(*d_query_stmt)->
bind("domain_id", zoneId)->
public:
MyDNSLoader() {
BackendMakers().report(new MyDNSFactory());
- L << Logger::Info << "[mydnsbackend] This is the mydns backend version " VERSION
+ g_log << Logger::Info << "[mydnsbackend] This is the mydns backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
if( getArg( "host" ).size() > 0 )
{
- L.log( m_myname + " WARNING: Using deprecated opendbx-host parameter", Logger::Warning );
+ g_log.log( m_myname + " WARNING: Using deprecated opendbx-host parameter", Logger::Warning );
stringtok( m_hosts[READ], getArg( "host" ), ", " );
m_hosts[WRITE] = m_hosts[READ];
}
}
catch( std::exception& e )
{
- L.log( m_myname + " OdbxBackend(): Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " OdbxBackend(): Caught STL exception - " + e.what(), Logger::Error );
throw( PDNSException( "Fatal: STL exception" ) );
}
}
try
{
- DLOG( L.log( m_myname + " getDomainInfo()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " getDomainInfo()", Logger::Debug ) );
string stmt = getArg( "sql-zoneinfo" );
string& stmtref = strbind( ":name", escape( domain.makeLowerCase().toStringRootDot(), READ ), stmt );
}
catch( std::exception& e )
{
- L.log( m_myname + " getDomainInfo: Caught STL std::exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " getDomainInfo: Caught STL std::exception - " + e.what(), Logger::Error );
return false;
}
try
{
- DLOG( L.log( m_myname + " getSOA()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " getSOA()", Logger::Debug ) );
string stmt = getArg( "sql-lookupsoa" );
string& stmtref = strbind( ":name", escape( domain.makeLowerCase().toStringRootDot(), READ ), stmt );
}
catch( std::exception& e )
{
- L.log( m_myname + " getSOA: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " getSOA: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
{
try
{
- DLOG( L.log( m_myname + " list()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " list()", Logger::Debug ) );
m_qname.clear();
m_result = NULL;
if( len < 0 )
{
- L.log( m_myname + " list: Unable to convert zone id to string - format error", Logger::Error );
+ g_log.log( m_myname + " list: Unable to convert zone id to string - format error", Logger::Error );
return false;
}
if( len > static_cast<int>(sizeof( m_buffer )) - 1 )
{
- L.log( m_myname + " list: Unable to convert zone id to string - insufficient buffer space", Logger::Error );
+ g_log.log( m_myname + " list: Unable to convert zone id to string - insufficient buffer space", Logger::Error );
return false;
}
}
catch( std::exception& e )
{
- L.log( m_myname + " list: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " list: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
{
try
{
- DLOG( L.log( m_myname + " lookup()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " lookup()", Logger::Debug ) );
string stmt;
string& stmtref = stmt;
if( len < 0 )
{
- L.log( m_myname + " lookup: Unable to convert zone id to string - format error", Logger::Error );
+ g_log.log( m_myname + " lookup: Unable to convert zone id to string - format error", Logger::Error );
throw( DBException( "Error: Libc error" ) );
}
if( len > static_cast<int>(sizeof( m_buffer )) - 1 )
{
- L.log( m_myname + " lookup: Unable to convert zone id to string - insufficient buffer space", Logger::Error );
+ g_log.log( m_myname + " lookup: Unable to convert zone id to string - insufficient buffer space", Logger::Error );
throw( DBException( "Error: Libc error" ) );
}
}
catch( std::exception& e )
{
- L.log( m_myname + " lookup: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " lookup: Caught STL exception - " + e.what(), Logger::Error );
throw( DBException( "Error: STL exception" ) );
}
}
try
{
- DLOG( L.log( m_myname + " get()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " get()", Logger::Debug ) );
if( getRecord( READ ) )
{
}
catch( std::exception& e )
{
- L.log( m_myname + " get: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " get: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
try
{
- DLOG( L.log( m_myname + " setFresh()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " setFresh()", Logger::Debug ) );
if( !m_handle[WRITE] && !connectTo( m_hosts[WRITE], WRITE ) )
{
- L.log( m_myname + " setFresh: Master server is unreachable", Logger::Error );
+ g_log.log( m_myname + " setFresh: Master server is unreachable", Logger::Error );
throw( DBException( "Error: Server unreachable" ) );
}
if( len < 0 )
{
- L.log( m_myname + " setFresh: Unable to insert values into statement '" + getArg( "sql-update-lastcheck" ) + "' - format error", Logger::Error );
+ g_log.log( m_myname + " setFresh: Unable to insert values into statement '" + getArg( "sql-update-lastcheck" ) + "' - format error", Logger::Error );
throw( DBException( "Error: Libc error" ) );
}
if( len > static_cast<int>(sizeof( m_buffer )) - 1 )
{
- L.log( m_myname + " setFresh: Unable to insert values into statement '" + getArg( "sql-update-lastcheck" ) + "' - insufficient buffer space", Logger::Error );
+ g_log.log( m_myname + " setFresh: Unable to insert values into statement '" + getArg( "sql-update-lastcheck" ) + "' - insufficient buffer space", Logger::Error );
throw( DBException( "Error: Libc error" ) );
}
}
catch ( std::exception& e )
{
- L.log( m_myname + " setFresh: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " setFresh: Caught STL exception - " + e.what(), Logger::Error );
throw( DBException( "Error: STL exception" ) );
}
}
{
try
{
- DLOG( L.log( m_myname + " setNotified()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " setNotified()", Logger::Debug ) );
if( !m_handle[WRITE] && !connectTo( m_hosts[WRITE], WRITE ) )
{
- L.log( m_myname + " setFresh: Master server is unreachable", Logger::Error );
+ g_log.log( m_myname + " setFresh: Master server is unreachable", Logger::Error );
throw( DBException( "Error: Server unreachable" ) );
}
if( len < 0 )
{
- L.log( m_myname + " setNotified: Unable to insert values into statement '" + getArg( "sql-update-serial" ) + "' - format error", Logger::Error );
+ g_log.log( m_myname + " setNotified: Unable to insert values into statement '" + getArg( "sql-update-serial" ) + "' - format error", Logger::Error );
throw( DBException( "Error: Libc error" ) );
}
if( len > static_cast<int>(sizeof( m_buffer )) - 1 )
{
- L.log( m_myname + " setNotified: Unable to insert values into statement '" + getArg( "sql-update-serial" ) + "' - insufficient buffer space", Logger::Error );
+ g_log.log( m_myname + " setNotified: Unable to insert values into statement '" + getArg( "sql-update-serial" ) + "' - insufficient buffer space", Logger::Error );
throw( DBException( "Error: Libc error" ) );
}
}
catch ( std::exception& e )
{
- L.log( m_myname + " setNotified: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " setNotified: Caught STL exception - " + e.what(), Logger::Error );
throw( DBException( "Error: STL exception" ) );
}
}
{
try
{
- DLOG( L.log( m_myname + " isMaster()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " isMaster()", Logger::Debug ) );
string stmt = getArg( "sql-master" );
string& stmtref = strbind( ":name", escape( domain.makeLowerCase().toStringRootDot(), READ ), stmt );
}
catch ( std::exception& e )
{
- L.log( m_myname + " isMaster: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " isMaster: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
{
try
{
- DLOG( L.log( m_myname + " getUnfreshSlaveInfos()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " getUnfreshSlaveInfos()", Logger::Debug ) );
if( unfresh == NULL )
{
- L.log( m_myname + " getUnfreshSlaveInfos: invalid parameter - NULL pointer", Logger::Error );
+ g_log.log( m_myname + " getUnfreshSlaveInfos: invalid parameter - NULL pointer", Logger::Error );
return;
}
}
catch ( std::exception& e )
{
- L.log( m_myname + " getUnfreshSlaveInfo: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " getUnfreshSlaveInfo: Caught STL exception - " + e.what(), Logger::Error );
}
}
{
try
{
- DLOG( L.log( m_myname + " getUpdatedMasters()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " getUpdatedMasters()", Logger::Debug ) );
if( updated == NULL )
{
- L.log( m_myname + " getUpdatedMasters: invalid parameter - NULL pointer", Logger::Error );
+ g_log.log( m_myname + " getUpdatedMasters: invalid parameter - NULL pointer", Logger::Error );
return;
}
}
catch ( std::exception& e )
{
- L.log( m_myname + " getUpdatedMasters: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " getUpdatedMasters: Caught STL exception - " + e.what(), Logger::Error );
}
}
{
try
{
- DLOG( L.log( m_myname + " superMasterBackend()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " superMasterBackend()", Logger::Debug ) );
if( account != NULL && ddb != NULL )
{
}
catch ( std::exception& e )
{
- L.log( m_myname + " superMasterBackend: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " superMasterBackend: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
{
try
{
- DLOG( L.log( m_myname + " createSlaveDomain()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " createSlaveDomain()", Logger::Debug ) );
if( !m_handle[WRITE] && !connectTo( m_hosts[WRITE], WRITE ) )
{
- L.log( m_myname + " createSlaveDomain: Master server is unreachable", Logger::Error );
+ g_log.log( m_myname + " createSlaveDomain: Master server is unreachable", Logger::Error );
return false;
}
if( len < 0 )
{
- L.log( m_myname + " createSlaveDomain: Unable to insert values in statement '" + getArg( "sql-insert-slave" ) + "' - format error", Logger::Error );
+ g_log.log( m_myname + " createSlaveDomain: Unable to insert values in statement '" + getArg( "sql-insert-slave" ) + "' - format error", Logger::Error );
return false;
}
if( len > static_cast<int>(sizeof( m_buffer )) - 1 )
{
- L.log( m_myname + " createSlaveDomain: Unable to insert values in statement '" + getArg( "sql-insert-slave" ) + "' - insufficient buffer space", Logger::Error );
+ g_log.log( m_myname + " createSlaveDomain: Unable to insert values in statement '" + getArg( "sql-insert-slave" ) + "' - insufficient buffer space", Logger::Error );
return false;
}
}
catch ( std::exception& e )
{
- L.log( m_myname + " createSlaveDomain: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " createSlaveDomain: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
{
try
{
- DLOG( L.log( m_myname + " feedRecord()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " feedRecord()", Logger::Debug ) );
if( !m_handle[WRITE] && !connectTo( m_hosts[WRITE], WRITE ) )
{
- L.log( m_myname + " feedRecord: Master server is unreachable", Logger::Error );
+ g_log.log( m_myname + " feedRecord: Master server is unreachable", Logger::Error );
return false;
}
if( len < 0 )
{
- L.log( m_myname + " feedRecord: Unable to insert values in statement '" + getArg( "sql-insert-record" ) + "' - format error", Logger::Error );
+ g_log.log( m_myname + " feedRecord: Unable to insert values in statement '" + getArg( "sql-insert-record" ) + "' - format error", Logger::Error );
return false;
}
if( len > static_cast<int>(sizeof( m_buffer )) - 1 )
{
- L.log( m_myname + " feedRecord: Unable to insert values in statement '" + getArg( "sql-insert-record" ) + "' - insufficient buffer space", Logger::Error );
+ g_log.log( m_myname + " feedRecord: Unable to insert values in statement '" + getArg( "sql-insert-record" ) + "' - insufficient buffer space", Logger::Error );
return false;
}
}
catch ( std::exception& e )
{
- L.log( m_myname + " feedRecord: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " feedRecord: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
{
try
{
- DLOG( L.log( m_myname + " startTransaction()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " startTransaction()", Logger::Debug ) );
if( !m_handle[WRITE] && !connectTo( m_hosts[WRITE], WRITE ) )
{
- L.log( m_myname + " startTransaction: Master server is unreachable", Logger::Error );
+ g_log.log( m_myname + " startTransaction: Master server is unreachable", Logger::Error );
return false;
}
if( len < 0 )
{
- L.log( m_myname + " startTransaction: Unable to convert zone id to string - format error", Logger::Error );
+ g_log.log( m_myname + " startTransaction: Unable to convert zone id to string - format error", Logger::Error );
return false;
}
if( len > static_cast<int>(sizeof( m_buffer )) - 1 )
{
- L.log( m_myname + " startTransaction: Unable to convert zone id to string - insufficient buffer space", Logger::Error );
+ g_log.log( m_myname + " startTransaction: Unable to convert zone id to string - insufficient buffer space", Logger::Error );
return false;
}
}
catch ( std::exception& e )
{
- L.log( m_myname + " startTransaction: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " startTransaction: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
{
try
{
- DLOG( L.log( m_myname + " commitTransaction()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " commitTransaction()", Logger::Debug ) );
if( !m_handle[WRITE] && !connectTo( m_hosts[WRITE], WRITE ) )
{
- L.log( m_myname + " commitTransaction: Master server is unreachable", Logger::Error );
+ g_log.log( m_myname + " commitTransaction: Master server is unreachable", Logger::Error );
return false;
}
}
catch ( std::exception& e )
{
- L.log( m_myname + " commitTransaction: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " commitTransaction: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
{
try
{
- DLOG( L.log( m_myname + " abortTransaction()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " abortTransaction()", Logger::Debug ) );
if( !m_handle[WRITE] && !connectTo( m_hosts[WRITE], WRITE ) )
{
- L.log( m_myname + " abortTransaction: Master server is unreachable", Logger::Error );
+ g_log.log( m_myname + " abortTransaction: Master server is unreachable", Logger::Error );
return false;
}
}
catch ( std::exception& e )
{
- L.log( m_myname + " abortTransaction: Caught STL exception - " + e.what(), Logger::Error );
+ g_log.log( m_myname + " abortTransaction: Caught STL exception - " + e.what(), Logger::Error );
return false;
}
OdbxLoader()
{
BackendMakers().report( &factory );
- L<< Logger::Info << "[opendbxbackend] This is the opendbx backend version " VERSION
+ g_log<< Logger::Info << "[opendbxbackend] This is the opendbx backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
if( type == WRITE && getArg( "backend" ) == "sqlite" )
{
- L.log( m_myname + " Using same SQLite connection for reading and writing to '" + hosts[odbx_host_index[READ]] + "'", Logger::Notice );
+ g_log.log( m_myname + " Using same SQLite connection for reading and writing to '" + hosts[odbx_host_index[READ]] + "'", Logger::Notice );
m_handle[WRITE] = m_handle[READ];
return true;
}
{
if( ( err = odbx_bind( m_handle[type], getArg( "database" ).c_str(), getArg( "username" ).c_str(), getArg( "password" ).c_str(), ODBX_BIND_SIMPLE ) ) == ODBX_ERR_SUCCESS )
{
- L.log( m_myname + " Database connection (" + (type ? "write" : "read") + ") to '" + hosts[h] + "' succeeded", Logger::Notice );
+ g_log.log( m_myname + " Database connection (" + (type ? "write" : "read") + ") to '" + hosts[h] + "' succeeded", Logger::Notice );
return true;
}
- L.log( m_myname + " Unable to bind to database on host " + hosts[h] + " - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
+ g_log.log( m_myname + " Unable to bind to database on host " + hosts[h] + " - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
continue;
}
- L.log( m_myname + " Unable to connect to server on host " + hosts[h] + " - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
+ g_log.log( m_myname + " Unable to connect to server on host " + hosts[h] + " - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
}
m_handle[type] = NULL;
int err;
- DLOG( L.log( m_myname + " execStmt()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " execStmt()", Logger::Debug ) );
- if( m_qlog ) { L.log( m_myname + " Query: " + stmt, Logger::Info ); }
+ if( m_qlog ) { g_log.log( m_myname + " Query: " + stmt, Logger::Info ); }
if( ( err = odbx_query( m_handle[type], stmt, length ) ) < 0 )
{
- L.log( m_myname + " execStmt: Unable to execute query - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
+ g_log.log( m_myname + " execStmt: Unable to execute query - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
if( err != -ODBX_ERR_PARAM && odbx_error_type( m_handle[type], err ) > 0 ) { return false; } // ODBX_ERR_PARAM workaround
if( !connectTo( m_hosts[type], type ) ) { return false; }
int err = 3;
- DLOG( L.log( m_myname + " getRecord()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " getRecord()", Logger::Debug ) );
do
{
if( err < 0 )
{
- L.log( m_myname + " getRecord: Unable to get next result - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
+ g_log.log( m_myname + " getRecord: Unable to get next result - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
throw( PDNSException( "Error: odbx_result() failed" ) );
}
{
if( ( err = odbx_row_fetch( m_result ) ) < 0 )
{
- L.log( m_myname + " getRecord: Unable to get next row - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
+ g_log.log( m_myname + " getRecord: Unable to get next row - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
throw( PDNSException( "Error: odbx_row_fetch() failed" ) );
}
}
}
- L.log( m_myname + " Values: " + fields, Logger::Error );
+ g_log.log( m_myname + " Values: " + fields, Logger::Error );
#endif
return true;
}
unsigned long len = sizeof( m_escbuf );
- DLOG( L.log( m_myname + " escape(string)", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " escape(string)", Logger::Debug ) );
if( ( err = odbx_escape( m_handle[type], str.c_str(), str.size(), m_escbuf, &len ) ) < 0 )
{
- L.log( m_myname + " escape(string): Unable to escape string - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
+ g_log.log( m_myname + " escape(string): Unable to escape string - " + string( odbx_error( m_handle[type], err ) ), Logger::Error );
if( err != -ODBX_ERR_PARAM && odbx_error_type( m_handle[type], err ) > 0 ) { throw( runtime_error( "odbx_escape() failed" ) ); } // ODBX_ERR_PARAM workaround
if( !connectTo( m_hosts[type], type ) ) { throw( runtime_error( "odbx_escape() failed" ) ); }
SOAData sd;
- DLOG( L.log( m_myname + " getDomainList()", Logger::Debug ) );
+ DLOG( g_log.log( m_myname + " getDomainList()", Logger::Debug ) );
if( !execStmt( stmt.c_str(), stmt.size(), READ ) ) { return false; }
if( !getRecord( READ ) ) { return false; }
throw OracleException("Setting session pool get mode", oraerr);
}
} catch (OracleException &theException) {
- L << Logger::Critical << "OracleFactory: "
+ g_log << Logger::Critical << "OracleFactory: "
<< theException.reason << endl;
Cleanup();
throw theException;
throw OracleException("OCISessionPoolDestroy", oraerr);
}
} catch (OracleException &theException) {
- L << Logger::Error << "Failed to destroy Oracle session pool: "
+ g_log << Logger::Error << "Failed to destroy Oracle session pool: "
<< theException.reason << endl;
}
}
OracleLoader()
{
BackendMakers().report(new OracleFactory);
- L << Logger::Info << "[oraclebackend] This is the oracle backend version " VERSION
+ g_log << Logger::Info << "[oraclebackend] This is the oracle backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
d_cp->send("HELO\t"+std::to_string(d_abiVersion));
string banner;
d_cp->receive(banner);
- L<<Logger::Error<<"Backend launched with banner: "<<banner<<endl;
+ g_log<<Logger::Error<<"Backend launched with banner: "<<banner<<endl;
}
void CoWrapper::send(const string &line)
return;
}
catch(PDNSException &ae) {
- L<<Logger::Warning<<kBackendId<<" Unable to receive data from coprocess. "<<ae.reason<<endl;
+ g_log<<Logger::Warning<<kBackendId<<" Unable to receive data from coprocess. "<<ae.reason<<endl;
delete d_cp;
d_cp=0;
throw;
launch();
}
catch(const ArgException &A) {
- L<<Logger::Error<<kBackendId<<" Unable to launch, fatal argument error: "<<A.reason<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Unable to launch, fatal argument error: "<<A.reason<<endl;
throw;
}
catch(...) {
d_disavow=false;
if(d_regex && !d_regex->match(qname.toStringRootDot())) {
if(::arg().mustDo("query-logging"))
- L<<Logger::Error<<"Query for '"<<qname<<"' failed regex '"<<d_regexstr<<"'"<<endl;
+ g_log<<Logger::Error<<"Query for '"<<qname<<"' failed regex '"<<d_regexstr<<"'"<<endl;
d_disavow=true; // don't pass to backend
} else {
ostringstream query;
query <<"\t"<<realRemote.toString();
if(::arg().mustDo("query-logging"))
- L<<Logger::Error<<"Query: '"<<query.str()<<"'"<<endl;
+ g_log<<Logger::Error<<"Query: '"<<query.str()<<"'"<<endl;
d_coproc->send(query.str());
}
}
catch(PDNSException &pe) {
- L<<Logger::Error<<kBackendId<<" Error from coprocess: "<<pe.reason<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Error from coprocess: "<<pe.reason<<endl;
d_disavow = true;
}
d_qtype=qtype;
d_coproc->send(query.str());
}
catch(PDNSException &ae) {
- L<<Logger::Error<<kBackendId<<" Error from coprocess: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Error from coprocess: "<<ae.reason<<endl;
}
d_qname=DNSName(itoa(inZoneId)); // why do we store a number here??
return true;
d_coproc->send(oss.str());
}
catch(PDNSException &ae) {
- L<<Logger::Error<<kBackendId<<" Error from coprocess: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Error from coprocess: "<<ae.reason<<endl;
cleanup();
}
return new PipeBackend();
}
catch(...) {
- L<<Logger::Error<<kBackendId<<" Unable to instantiate a pipebackend!"<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Unable to instantiate a pipebackend!"<<endl;
return 0;
}
}
vector<string>parts;
stringtok(parts,line,"\t");
if(parts.empty()) {
- L<<Logger::Error<<kBackendId<<" Coprocess returned empty line in query for "<<d_qname<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Coprocess returned empty line in query for "<<d_qname<<endl;
throw PDNSException("Format error communicating with coprocess");
}
else if(parts[0]=="FAIL") {
return false;
}
else if(parts[0]=="LOG") {
- L<<Logger::Error<<"Coprocess: "<<line.substr(4)<<endl;
+ g_log<<Logger::Error<<"Coprocess: "<<line.substr(4)<<endl;
continue;
}
else if(parts[0]=="DATA") { // yay
if(parts.size() < 7 + extraFields) {
- L<<Logger::Error<<kBackendId<<" Coprocess returned incomplete or empty line in data section for query for "<<d_qname<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Coprocess returned incomplete or empty line in data section for query for "<<d_qname<<endl;
throw PDNSException("Format error communicating with coprocess in data section");
// now what?
}
}
else {
if(parts.size()< 8 + extraFields) {
- L<<Logger::Error<<kBackendId<<" Coprocess returned incomplete MX/SRV line in data section for query for "<<d_qname<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Coprocess returned incomplete MX/SRV line in data section for query for "<<d_qname<<endl;
throw PDNSException("Format error communicating with coprocess in data section of MX/SRV record");
}
}
}
catch (DBException &dbe) {
- L<<Logger::Error<<kBackendId<<" "<<dbe.reason<<endl;
+ g_log<<Logger::Error<<kBackendId<<" "<<dbe.reason<<endl;
throw;
}
catch (PDNSException &pe) {
- L<<Logger::Error<<kBackendId<<" "<<pe.reason<<endl;
+ g_log<<Logger::Error<<kBackendId<<" "<<pe.reason<<endl;
cleanup();
throw;
}
PipeLoader()
{
BackendMakers().report(new PipeFactory);
- L << Logger::Info << kBackendId <<" This is the pipe backend version " VERSION
+ g_log << Logger::Info << kBackendId <<" This is the pipe backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
RandomLoader()
{
BackendMakers().report(new RandomFactory);
- L << Logger::Info << "[randombackend] This is the random backend version " VERSION
+ g_log << Logger::Info << "[randombackend] This is the random backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
d_socket->writenWithTimeout(out.str().c_str(), out.str().size(), timeout);
rv = 1;
} catch (NetworkError& ne) {
- L<<Logger::Error<<"While writing to HTTP endpoint "<<d_addr.toStringWithPort()<<": "<<ne.what()<<std::endl;
+ g_log<<Logger::Error<<"While writing to HTTP endpoint "<<d_addr.toStringWithPort()<<": "<<ne.what()<<std::endl;
} catch (...) {
- L<<Logger::Error<<"While writing to HTTP endpoint "<<d_addr.toStringWithPort()<<": exception caught"<<std::endl;
+ g_log<<Logger::Error<<"While writing to HTTP endpoint "<<d_addr.toStringWithPort()<<": exception caught"<<std::endl;
}
}
}
d_socket->writenWithTimeout(out.str().c_str(), out.str().size(), timeout);
rv = 1;
} catch (NetworkError& ne) {
- L<<Logger::Error<<"While writing to HTTP endpoint "<<d_addr.toStringWithPort()<<": "<<ne.what()<<std::endl;
+ g_log<<Logger::Error<<"While writing to HTTP endpoint "<<d_addr.toStringWithPort()<<": "<<ne.what()<<std::endl;
} catch (...) {
- L<<Logger::Error<<"While writing to HTTP endpoint "<<d_addr.toStringWithPort()<<": exception caught"<<std::endl;
+ g_log<<Logger::Error<<"While writing to HTTP endpoint "<<d_addr.toStringWithPort()<<": exception caught"<<std::endl;
}
if (rv > -1) break;
}
freeaddrinfo(gAddr);
} else {
- L<<Logger::Error<<"Unable to resolve " << req.url.host << ": " << gai_strerror(ec) << std::endl;
+ g_log<<Logger::Error<<"Unable to resolve " << req.url.host << ": " << gai_strerror(ec) << std::endl;
}
}
if (arl.ready() == false)
throw NetworkError("timeout");
} catch (NetworkError &ne) {
- L<<Logger::Error<<"While reading from HTTP endpoint "<<d_addr.toStringWithPort()<<": "<<ne.what()<<std::endl;
+ g_log<<Logger::Error<<"While reading from HTTP endpoint "<<d_addr.toStringWithPort()<<": "<<ne.what()<<std::endl;
delete d_socket;
d_socket = NULL;
fail = true;
} catch (...) {
- L<<Logger::Error<<"While reading from HTTP endpoint "<<d_addr.toStringWithPort()<<": exception caught"<<std::endl;
+ g_log<<Logger::Error<<"While reading from HTTP endpoint "<<d_addr.toStringWithPort()<<": exception caught"<<std::endl;
delete d_socket;
fail = true;
}
std::string err;
output = Json::parse(resp.body, err);
if (output != nullptr) return resp.body.size();
- L<<Logger::Error<<"Cannot parse JSON reply: "<<err<<endl;
+ g_log<<Logger::Error<<"Cannot parse JSON reply: "<<err<<endl;
return rv;
}
PipeConnector::PipeConnector(std::map<std::string,std::string> optionsMap) {
if (optionsMap.count("command") == 0) {
- L<<Logger::Error<<"Cannot find 'command' option in connection string"<<endl;
+ g_log<<Logger::Error<<"Cannot find 'command' option in connection string"<<endl;
throw PDNSException();
}
this->command = optionsMap.find("command")->second;
this->send(msg);
msg = nullptr;
if (this->recv(msg)==false) {
- L<<Logger::Error<<"Failed to initialize coprocess"<<std::endl;
+ g_log<<Logger::Error<<"Failed to initialize coprocess"<<std::endl;
}
}
if (value["result"].is_bool() && boolFromJson(value, "result", false) == false)
rv = false;
for(const auto& message: value["log"].array_items())
- L<<Logger::Info<<"[remotebackend]: "<< message.string_value() <<std::endl;
+ g_log<<Logger::Info<<"[remotebackend]: "<< message.string_value() <<std::endl;
return rv;
}
return false;
try {
return connector->send(value);
} catch (PDNSException &ex) {
- L<<Logger::Error<<"Exception caught when sending: "<<ex.reason<<std::endl;
+ g_log<<Logger::Error<<"Exception caught when sending: "<<ex.reason<<std::endl;
}
delete this->connector;
try {
return connector->recv(value);
} catch (PDNSException &ex) {
- L<<Logger::Error<<"Exception caught when receiving: "<<ex.reason<<std::endl;
+ g_log<<Logger::Error<<"Exception caught when receiving: "<<ex.reason<<std::endl;
} catch (...) {
- L<<Logger::Error<<"Exception caught when receiving"<<std::endl;;
+ g_log<<Logger::Error<<"Exception caught when receiving"<<std::endl;;
}
delete this->connector;
Json answer;
if (this->send(query) == false || this->recv(answer) == false) {
- L<<Logger::Error<<kBackendId<<" Failed to execute RPC for RemoteBackend::setNotified("<<id<<","<<serial<<")"<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Failed to execute RPC for RemoteBackend::setNotified("<<id<<","<<serial<<")"<<endl;
}
}
return new RemoteBackend();
}
catch(...) {
- L<<Logger::Error<<kBackendId<<" Unable to instantiate a remotebackend!"<<endl;
+ g_log<<Logger::Error<<kBackendId<<" Unable to instantiate a remotebackend!"<<endl;
return 0;
};
}
RemoteLoader::RemoteLoader() {
BackendMakers().report(new RemoteBackendFactory);
- L << Logger::Info << kBackendId << " This is the remote backend version " VERSION
+ g_log << Logger::Info << kBackendId << " This is the remote backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
UnixsocketConnector::UnixsocketConnector(std::map<std::string,std::string> optionsMap) {
if (optionsMap.count("path") == 0) {
- L<<Logger::Error<<"Cannot find 'path' option in connection string"<<endl;
+ g_log<<Logger::Error<<"Cannot find 'path' option in connection string"<<endl;
throw PDNSException();
}
this->timeout = 2000;
UnixsocketConnector::~UnixsocketConnector() {
if (this->connected) {
try {
- L<<Logger::Info<<"closing socket connection"<<endl;
+ g_log<<Logger::Info<<"closing socket connection"<<endl;
}
catch (...) {
}
if (connected) return; // no point reconnecting if connected...
connected = true;
- L<<Logger::Info<<"Reconnecting to backend" << std::endl;
+ g_log<<Logger::Info<<"Reconnecting to backend" << std::endl;
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0) {
connected = false;
- L<<Logger::Error<<"Cannot create socket: " << strerror(errno) << std::endl;;
+ g_log<<Logger::Error<<"Cannot create socket: " << strerror(errno) << std::endl;;
return;
}
if (makeUNsockaddr(path, &sock)) {
- L<<Logger::Error<<"Unable to create UNIX domain socket: Path '"<<path<<"' is not a valid UNIX socket path."<<std::endl;
+ g_log<<Logger::Error<<"Unable to create UNIX domain socket: Path '"<<path<<"' is not a valid UNIX socket path."<<std::endl;
return;
}
rv = connect(fd, reinterpret_cast<struct sockaddr*>(&sock), sizeof sock);
if (rv != 0 && errno != EISCONN && errno != 0) {
- L<<Logger::Error<<"Cannot connect to socket: " << strerror(errno) << std::endl;
+ g_log<<Logger::Error<<"Cannot connect to socket: " << strerror(errno) << std::endl;
close(fd);
connected = false;
return;
this->send(msg);
msg = nullptr;
if (this->recv(msg) == false) {
- L<<Logger::Warning << "Failed to initialize backend" << std::endl;
+ g_log<<Logger::Warning << "Failed to initialize backend" << std::endl;
close(fd);
this->connected = false;
}
// lookup timeout, target and stuff
if (options.count("endpoint") == 0) {
- L<<Logger::Error<<"Cannot find 'endpoint' option in connection string"<<endl;
+ g_log<<Logger::Error<<"Cannot find 'endpoint' option in connection string"<<endl;
throw PDNSException("Cannot find 'endpoint' option in connection string");
}
this->d_endpoint = options.find("endpoint")->second;
if(zmq_connect(this->d_sock, this->d_endpoint.c_str()) < 0)
{
- L<<Logger::Error<<"zmq_connect() failed"<< zmq_strerror(errno)<<std::endl;;
+ g_log<<Logger::Error<<"zmq_connect() failed"<< zmq_strerror(errno)<<std::endl;;
throw PDNSException("Cannot find 'endpoint' option in connection string");
}
this->send(msg);
msg = nullptr;
if (this->recv(msg)==false) {
- L<<Logger::Error<<"Failed to initialize zeromq"<<std::endl;
+ g_log<<Logger::Error<<"Failed to initialize zeromq"<<std::endl;
throw PDNSException("Failed to initialize zeromq");
}
};
if (zmq_poll(&item, 1, 1)>0) {
if(zmq_msg_send(&message, this->d_sock, 0) == -1) {
// message was not sent
- L<<Logger::Error<<"Cannot send to " << this->d_endpoint << ": " << zmq_strerror(errno)<<std::endl;
+ g_log<<Logger::Error<<"Cannot send to " << this->d_endpoint << ": " << zmq_strerror(errno)<<std::endl;
} else
return line.size();
}
}
} catch (std::exception &ex) {
- L<<Logger::Error<<"Cannot send to " << this->d_endpoint << ": " << ex.what()<<std::endl;
+ g_log<<Logger::Error<<"Cannot send to " << this->d_endpoint << ": " << ex.what()<<std::endl;
throw PDNSException(ex.what());
}
if (output != nullptr)
rv = msg_size;
else
- L<<Logger::Error<<"Cannot parse JSON reply from " << this->d_endpoint << ": " << err << endl;
+ g_log<<Logger::Error<<"Cannot parse JSON reply from " << this->d_endpoint << ": " << err << endl;
break;
} else if (errno == EAGAIN) { continue; // try again }
} else {
}
}
} catch (std::exception &ex) {
- L<<Logger::Error<<"Cannot receive from " << this->d_endpoint << ": " << ex.what()<<std::endl;
+ g_log<<Logger::Error<<"Cannot receive from " << this->d_endpoint << ": " << ex.what()<<std::endl;
throw PDNSException(ex.what());
}
d_fd = open(cdbfile.c_str(), O_RDONLY);
if (d_fd < 0)
{
- L<<Logger::Error<<"Failed to open cdb database file '"<<cdbfile<<"'. Error: "<<stringerror()<<endl;
+ g_log<<Logger::Error<<"Failed to open cdb database file '"<<cdbfile<<"'. Error: "<<stringerror()<<endl;
throw new PDNSException("Failed to open cdb database file '"+cdbfile+"'. Error: " + stringerror());
}
int cdbinit = cdb_init(&d_cdb, d_fd);
if (cdbinit < 0)
{
- L<<Logger::Error<<"Failed to initialize cdb structure. ErrorNr: '"<<cdbinit<<endl;
+ g_log<<Logger::Error<<"Failed to initialize cdb structure. ErrorNr: '"<<cdbinit<<endl;
throw new PDNSException("Failed to initialize cdb structure.");
}
TDIById_t& domain_index = domains->get<tag_domainid>();
TDIById_t::iterator itById = domain_index.find(id);
if (itById == domain_index.end()) {
- L<<Logger::Error<<backendname<<"Received updated serial("<<serial<<"), but domain ID ("<<id<<") is not known in this backend."<<endl;
+ g_log<<Logger::Error<<backendname<<"Received updated serial("<<serial<<"), but domain ID ("<<id<<") is not known in this backend."<<endl;
} else {
- DLOG(L<<Logger::Debug<<backendname<<"Setting serial for "<<itById->zone<<" to "<<serial<<endl);
+ DLOG(g_log<<Logger::Debug<<backendname<<"Setting serial for "<<itById->zone<<" to "<<serial<<endl);
domain_index.modify(itById, TDI_SerialModifier(serial));
}
s_domainInfo[d_suffix] = *domains;
string key=simpleCompress(queryDomain);
- DLOG(L<<Logger::Debug<<backendname<<"[lookup] query for qtype ["<<qtype.getName()<<"] qdomain ["<<qdomain<<"]"<<endl);
- DLOG(L<<Logger::Debug<<"[lookup] key ["<<makeHexDump(key)<<"]"<<endl);
+ DLOG(g_log<<Logger::Debug<<backendname<<"[lookup] query for qtype ["<<qtype.getName()<<"] qdomain ["<<qdomain<<"]"<<endl);
+ DLOG(g_log<<Logger::Debug<<"[lookup] key ["<<makeHexDump(key)<<"]"<<endl);
d_isWildcardQuery = false;
if (key[0] == '\001' && key[1] == '\052') {
string val = record.second;
string key = record.first;
- //DLOG(L<<Logger::Debug<<"[GET] Key: "<<makeHexDump(key)<<endl);
- //DLOG(L<<Logger::Debug<<"[GET] Val: "<<makeHexDump(val)<<endl);
+ //DLOG(g_log<<Logger::Debug<<"[GET] Key: "<<makeHexDump(key)<<endl);
+ //DLOG(g_log<<Logger::Debug<<"[GET] Val: "<<makeHexDump(val)<<endl);
if (key[0] == '\000' && key[1] == '\045') { // skip locations
continue;
}
DLOG(cerr<<"CONTENT: "<<rr.content<<endl);
}
catch (...) {
- L<<Logger::Error<<backendname<<"Failed to parse record content for "<<rr.qname<<" with type "<<rr.qtype.getName();
+ g_log<<Logger::Error<<backendname<<"Failed to parse record content for "<<rr.qname<<" with type "<<rr.qtype.getName();
if (d_ignorebogus) {
- L<<". Ignoring!"<<endl;
+ g_log<<". Ignoring!"<<endl;
continue;
} else {
- L<<". Erroring out!"<<endl;
+ g_log<<". Erroring out!"<<endl;
throw;
}
}
-// DLOG(L<<Logger::Debug<<backendname<<"Returning ["<<rr.content<<"] for ["<<rr.qname<<"] of RecordType ["<<rr.qtype.getName()<<"]"<<endl;);
+// DLOG(g_log<<Logger::Debug<<backendname<<"Returning ["<<rr.content<<"] for ["<<rr.qname<<"] of RecordType ["<<rr.qtype.getName()<<"]"<<endl;);
return true;
}
} // end of while
- DLOG(L<<Logger::Debug<<backendname<<"No more records to return."<<endl);
+ DLOG(g_log<<Logger::Debug<<backendname<<"No more records to return."<<endl);
d_cdbReader = nullptr;
return false;
public:
TinyDNSLoader() {
BackendMakers().report(new TinyDNSFactory);
- L << Logger::Info << "[tinydnsbackend] This is the tinydns backend version " VERSION
+ g_log << Logger::Info << "[tinydnsbackend] This is the tinydns backend version " VERSION
#ifndef REPRODUCIBLE
<< " (" __DATE__ " " __TIME__ ")"
#endif
set("include-dir","Directory to include configuration files from");
if(!parseFile(fname, "", lax)) {
- L << Logger::Warning << "Unable to open " << fname << std::endl;
+ g_log << Logger::Warning << "Unable to open " << fname << std::endl;
return false;
}
gatherIncludes(extraConfigs);
for(const std::string& fn : extraConfigs) {
if (!file(fn.c_str(), lax, true)) {
- L << Logger::Error << fn << " could not be parsed" << std::endl;
+ g_log << Logger::Error << fn << " could not be parsed" << std::endl;
throw ArgException(fn + " could not be parsed");
}
}
// stat
if (stat(params["include-dir"].c_str(), &st)) {
- L << Logger::Error << params["include-dir"] << " does not exist!" << std::endl;
+ g_log << Logger::Error << params["include-dir"] << " does not exist!" << std::endl;
throw ArgException(params["include-dir"] + " does not exist!");
}
// wonder if it's accessible directory
if (!S_ISDIR(st.st_mode)) {
- L << Logger::Error << params["include-dir"] << " is not a directory" << std::endl;
+ g_log << Logger::Error << params["include-dir"] << " is not a directory" << std::endl;
throw ArgException(params["include-dir"] + " is not a directory");
}
if (!(dir = opendir(params["include-dir"].c_str()))) {
- L << Logger::Error << params["include-dir"] << " is not accessible" << std::endl;
+ g_log << Logger::Error << params["include-dir"] << " is not accessible" << std::endl;
throw ArgException(params["include-dir"] + " is not accessible");
}
namebuf << params["include-dir"].c_str() << "/" << ent->d_name; // FIXME: Use some path separator
// ensure it's readable file
if (stat(namebuf.str().c_str(), &st) || !S_ISREG(st.st_mode)) {
- L << Logger::Error << namebuf.str() << " is not a file" << std::endl;
+ g_log << Logger::Error << namebuf.str() << " is not a file" << std::endl;
closedir(dir);
throw ArgException(namebuf.str() + " does not exist!");
}
writen2WithTimeout(s.getHandle(), msg.c_str(), msg.length(), 2);
} catch (runtime_error &e){
- L<<Logger::Warning<<"Unable to write data to carbon server at "<<remote.toStringWithPort()<<": "<<e.what()<<endl;
+ g_log<<Logger::Warning<<"Unable to write data to carbon server at "<<remote.toStringWithPort()<<": "<<e.what()<<endl;
continue;
}
}
}
catch(std::exception& e)
{
- L<<Logger::Error<<"Carbon thread died: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Carbon thread died: "<<e.what()<<endl;
return 0;
}
catch(PDNSException& e)
{
- L<<Logger::Error<<"Carbon thread died, PDNSException: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Carbon thread died, PDNSException: "<<e.reason<<endl;
return 0;
}
catch(...)
{
- L<<Logger::Error<<"Carbon thread died"<<endl;
+ g_log<<Logger::Error<<"Carbon thread died"<<endl;
return 0;
}
totErased = pruneLockedCollectionsVector(d_maps, maxCached, cacheSize);
*d_statnumentries -= totErased;
- DLOG(L<<"Done with cache clean, cacheSize: "<<(*d_statnumentries)<<", totErased"<<totErased<<endl);
+ DLOG(g_log<<"Done with cache clean, cacheSize: "<<(*d_statnumentries)<<", totErased"<<totErased<<endl);
}
/* the logic:
time_t now = time(nullptr);
int timediff = max((int)(now - d_lastclean), 1);
- DLOG(L<<"cleaninterval: "<<d_cleaninterval<<", timediff: "<<timediff<<endl);
+ DLOG(g_log<<"cleaninterval: "<<d_cleaninterval<<", timediff: "<<timediff<<endl);
if (d_cleaninterval == s_maxcleaninterval && timediff < 30) {
d_cleanskipped = true;
d_nextclean += d_cleaninterval;
- DLOG(L<<"cleaning skipped, timediff: "<<timediff<<endl);
+ DLOG(g_log<<"cleaning skipped, timediff: "<<timediff<<endl);
return;
}
d_cleaninterval=std::max(d_cleaninterval, s_mincleaninterval);
d_cleaninterval=std::min(d_cleaninterval, s_maxcleaninterval);
- DLOG(L<<"new cleaninterval: "<<d_cleaninterval<<endl);
+ DLOG(g_log<<"new cleaninterval: "<<d_cleaninterval<<endl);
} else {
d_cleanskipped = false;
}
totErased = pruneLockedCollectionsVector(d_maps, maxCached, cacheSize);
*d_statnumentries -= totErased;
- DLOG(L<<"Done with cache clean, cacheSize: "<<*d_statnumentries<<", totErased"<<totErased<<endl);
+ DLOG(g_log<<"Done with cache clean, cacheSize: "<<*d_statnumentries<<", totErased"<<totErased<<endl);
}
/* the logic:
time_t now = time(nullptr);
int timediff = max((int)(now - d_lastclean), 1);
- DLOG(L<<"cleaninterval: "<<d_cleaninterval<<", timediff: "<<timediff<<endl);
+ DLOG(g_log<<"cleaninterval: "<<d_cleaninterval<<", timediff: "<<timediff<<endl);
if (d_cleaninterval == s_maxcleaninterval && timediff < 30) {
d_cleanskipped = true;
d_nextclean += d_cleaninterval;
- DLOG(L<<"cleaning skipped, timediff: "<<timediff<<endl);
+ DLOG(g_log<<"cleaning skipped, timediff: "<<timediff<<endl);
return;
}
d_cleaninterval=std::max(d_cleaninterval, s_mincleaninterval);
d_cleaninterval=std::min(d_cleaninterval, s_maxcleaninterval);
- DLOG(L<<"new cleaninterval: "<<d_cleaninterval<<endl);
+ DLOG(g_log<<"new cleaninterval: "<<d_cleaninterval<<endl);
} else {
d_cleanskipped = false;
}
try {
SOAData sd;
if(!getSOA(domain, sd))
- L<<Logger::Notice<<"No serial for '"<<domain<<"' found - zone is missing?"<<endl;
+ g_log<<Logger::Notice<<"No serial for '"<<domain<<"' found - zone is missing?"<<endl;
else
di.serial = sd.serial;
}
catch(PDNSException &ae){
- L<<Logger::Error<<"Error retrieving serial for '"<<domain<<"': "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Error retrieving serial for '"<<domain<<"': "<<ae.reason<<endl;
}
di.kind = DomainInfo::stringToKind(type);
bool GSQLBackend::list(const DNSName &target, int domain_id, bool include_disabled)
{
- DLOG(L<<"GSQLBackend constructing handle for list of domain id '"<<domain_id<<"'"<<endl);
+ DLOG(g_log<<"GSQLBackend constructing handle for list of domain id '"<<domain_id<<"'"<<endl);
try {
reconnectIfNeeded();
bool GSQLBackend::get(DNSResourceRecord &r)
{
- // L << "GSQLBackend get() was called for "<<qtype.getName() << " record: ";
+ // g_log << "GSQLBackend get() was called for "<<qtype.getName() << " record: ";
SSqlStatement::row_t row;
skiprow:
void GSQLBackend::getAllDomains(vector<DomainInfo> *domains, bool include_disabled)
{
- DLOG(L<<"GSQLBackend retrieving all domains."<<endl);
+ DLOG(g_log<<"GSQLBackend retrieving all domains."<<endl);
try {
reconnectIfNeeded();
reset();
}
catch (const SSqlException& e) {
- //DLOG(L<<"GSQLBackend unable to calculate SOA serial: " << e.txtReason()<<endl);
+ //DLOG(g_log<<"GSQLBackend unable to calculate SOA serial: " << e.txtReason()<<endl);
return false;
}
}
catch(std::exception& e)
{
- L<<Logger::Error<<"Had error retrieving queue sizes: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Had error retrieving queue sizes: "<<e.what()<<endl;
return 0;
}
catch(PDNSException& e)
{
- L<<Logger::Error<<"Had error retrieving queue sizes: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Had error retrieving queue sizes: "<<e.reason<<endl;
return 0;
}
remote = P->getRemote().toString() + "<-" + P->getRealRemote().toString();
else
remote = P->getRemote().toString();
- L << Logger::Notice<<"Remote "<< remote <<" wants '" << P->qdomain<<"|"<<P->qtype.getName() <<
+ g_log << Logger::Notice<<"Remote "<< remote <<" wants '" << P->qdomain<<"|"<<P->qtype.getName() <<
"', do = " <<P->d_dnssecOk <<", bufsize = "<< P->getMaxReplyLen();
if(P->d_ednsRawPacketSizeLimit > 0 && P->getMaxReplyLen() != (unsigned int)P->d_ednsRawPacketSizeLimit)
- L<<" ("<<P->d_ednsRawPacketSizeLimit<<")";
- L<<": ";
+ g_log<<" ("<<P->d_ednsRawPacketSizeLimit<<")";
+ g_log<<": ";
}
if((P->d.opcode != Opcode::Notify && P->d.opcode != Opcode::Update) && P->couldBeCached()) {
bool haveSomething=PC.get(P, &cached); // does the PacketCache recognize this question?
if (haveSomething) {
if(logDNSQueries)
- L<<"packetcache HIT"<<endl;
+ g_log<<"packetcache HIT"<<endl;
cached.setRemote(&P->d_remote); // inlined
cached.setSocket(P->getSocket()); // inlined
cached.d_anyLocal = P->d_anyLocal;
if(distributor->isOverloaded()) {
if(logDNSQueries)
- L<<"Dropped query, backends are overloaded"<<endl;
+ g_log<<"Dropped query, backends are overloaded"<<endl;
overloadDrops++;
continue;
}
if(logDNSQueries)
- L<<"packetcache MISS"<<endl;
+ g_log<<"packetcache MISS"<<endl;
try {
distributor->question(P, &sendout); // otherwise, give to the distributor
}
catch(PDNSException& pe)
{
- L<<Logger::Error<<"Fatal error in question thread: "<<pe.reason<<endl;
+ g_log<<Logger::Error<<"Fatal error in question thread: "<<pe.reason<<endl;
_exit(1);
}
char *ns;
ns = getenv("NOTIFY_SOCKET");
if (ns != nullptr) {
- L<<Logger::Error<<"Unable to chroot when running from systemd. Please disable chroot= or set the 'Type' for this service to 'simple'"<<endl;
+ g_log<<Logger::Error<<"Unable to chroot when running from systemd. Please disable chroot= or set the 'Type' for this service to 'simple'"<<endl;
exit(1);
}
#endif
gethostbyname("a.root-servers.net"); // this forces all lookup libraries to be loaded
Utility::dropGroupPrivs(newuid, newgid);
if(chroot(::arg()["chroot"].c_str())<0 || chdir("/")<0) {
- L<<Logger::Error<<"Unable to chroot to '"+::arg()["chroot"]+"': "<<strerror(errno)<<", exiting"<<endl;
+ g_log<<Logger::Error<<"Unable to chroot to '"+::arg()["chroot"]+"': "<<strerror(errno)<<", exiting"<<endl;
exit(1);
}
else
- L<<Logger::Error<<"Chrooted to '"<<::arg()["chroot"]<<"'"<<endl;
+ g_log<<Logger::Error<<"Chrooted to '"<<::arg()["chroot"]<<"'"<<endl;
} else {
Utility::dropGroupPrivs(newuid, newgid);
}
algo = DNSSECKeeper::shorthand2algorithm(::arg()["default-"+algotype+"-algorithm"]);
size = ::arg().asNum("default-"+algotype+"-size");
if (algo == -1)
- L<<Logger::Warning<<"Warning: default-"<<algotype<<"-algorithm set to unknown algorithm: "<<::arg()["default-"+algotype+"-algorithm"]<<endl;
+ g_log<<Logger::Warning<<"Warning: default-"<<algotype<<"-algorithm set to unknown algorithm: "<<::arg()["default-"+algotype+"-algorithm"]<<endl;
else if (algo <= 10 && size == 0)
- L<<Logger::Warning<<"Warning: default-"<<algotype<<"-algorithm is set to an algorithm ("<<::arg()["default-"+algotype+"-algorithm"]<<") that requires a non-zero default-"<<algotype<<"-size!"<<endl;
+ g_log<<Logger::Warning<<"Warning: default-"<<algotype<<"-algorithm is set to an algorithm ("<<::arg()["default-"+algotype+"-algorithm"]<<") that requires a non-zero default-"<<algotype<<"-size!"<<endl;
}
// NOW SAFE TO CREATE THREADS!
catch(...){}
}
- L<<Logger::Error<<"Mainthread exiting - should never happen"<<endl;
+ g_log<<Logger::Error<<"Mainthread exiting - should never happen"<<endl;
}
listset.insert(caIp.toStringWithPort());
}
catch(PDNSException &e) {
- L<<Logger::Error<<"Unparseable IP in "<<listname<<". Error: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Unparseable IP in "<<listname<<". Error: "<<e.reason<<endl;
_exit(1);
}
}
PacketHandler::s_allowNotifyFrom.toMasks(::arg()["allow-notify-from"] );
}
catch(PDNSException &e) {
- L<<Logger::Error<<"Unparseable IP in allow-notify-from. Error: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Unparseable IP in allow-notify-from. Error: "<<e.reason<<endl;
_exit(1);
}
d_onlyNotify.toMasks(::arg()["only-notify"]);
}
catch(PDNSException &e) {
- L<<Logger::Error<<"Unparseable IP in only-notify. Error: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Unparseable IP in only-notify. Error: "<<e.reason<<endl;
_exit(1);
}
{
try {
signal(SIGPIPE,SIG_IGN);
- L<<Logger::Error<<"Master/slave communicator launching"<<endl;
+ g_log<<Logger::Error<<"Master/slave communicator launching"<<endl;
PacketHandler P;
d_tickinterval=::arg().asNum("slave-cycle-interval");
makeNotifySockets();
}
}
catch(PDNSException &ae) {
- L<<Logger::Error<<"Exiting because communicator thread died with error: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Exiting because communicator thread died with error: "<<ae.reason<<endl;
Utility::sleep(1);
_exit(1);
}
catch(std::exception &e) {
- L<<Logger::Error<<"Exiting because communicator thread died with STL error: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Exiting because communicator thread died with STL error: "<<e.what()<<endl;
_exit(1);
}
catch( ... )
{
- L << Logger::Error << "Exiting because communicator caught unknown exception." << endl;
+ g_log << Logger::Error << "Exiting because communicator caught unknown exception." << endl;
_exit(1);
}
}
*ns3p = NSEC3PARAMRecordContent(value);
if (ns3p->d_iterations > maxNSEC3Iterations) {
ns3p->d_iterations = maxNSEC3Iterations;
- L<<Logger::Error<<"Number of NSEC3 iterations for zone '"<<zname<<"' is above 'max-nsec3-iterations'. Value adjusted to: "<<maxNSEC3Iterations<<endl;
+ g_log<<Logger::Error<<"Number of NSEC3 iterations for zone '"<<zname<<"' is above 'max-nsec3-iterations'. Value adjusted to: "<<maxNSEC3Iterations<<endl;
}
if (ns3p->d_algorithm != 1) {
- L<<Logger::Error<<"Invalid hash algorithm for NSEC3: '"<<std::to_string(ns3p->d_algorithm)<<"', setting to 1 for zone '"<<zname<<"'."<<endl;
+ g_log<<Logger::Error<<"Invalid hash algorithm for NSEC3: '"<<std::to_string(ns3p->d_algorithm)<<"', setting to 1 for zone '"<<zname<<"'."<<endl;
ns3p->d_algorithm = 1;
}
}
dpk.d_flags = kd.flags;
dpk.d_algorithm = dkrc.d_algorithm;
if(dpk.d_algorithm == DNSSECKeeper::RSASHA1 && getNSEC3PARAM(zone)) {
- L<<Logger::Warning<<"Zone '"<<zone<<"' has NSEC3 semantics, but the "<< (kd.active ? "" : "in" ) <<"active key with id "<<kd.id<<" has 'Algorithm: 5'. This should be corrected to 'Algorithm: 7' in the database (or NSEC3 should be disabled)."<<endl;
+ g_log<<Logger::Warning<<"Zone '"<<zone<<"' has NSEC3 semantics, but the "<< (kd.active ? "" : "in" ) <<"active key with id "<<kd.id<<" has 'Algorithm: 5'. This should be corrected to 'Algorithm: 7' in the database (or NSEC3 should be disabled)."<<endl;
dpk.d_algorithm = DNSSECKeeper::RSASHA1NSEC3SHA1;
}
// cerr<<"Doing DB lookup for precomputed RRSIGs for '"<<(wildcardname.empty() ? qname : wildcardname)<<"'"<<endl;
SOAData sd;
if(!db.getSOAUncached(signer, sd)) {
- DLOG(L<<"Could not get SOA for domain"<<endl);
+ DLOG(g_log<<"Could not get SOA for domain"<<endl);
return false;
}
db.lookup(QType(QType::RRSIG), wildcardname.countLabels() ? wildcardname : qname, NULL, sd.domain_id);
{
if(!(maxent))
{
- L<<Logger::Warning<<"Zone '"<<zone<<"' has too many empty non terminals."<<endl;
+ g_log<<Logger::Warning<<"Zone '"<<zone<<"' has too many empty non terminals."<<endl;
insnonterm.clear();
delnonterm.clear();
doent=false;
template<class Answer, class Question, class Backend>SingleThreadDistributor<Answer,Question,Backend>::SingleThreadDistributor()
{
- L<<Logger::Error<<"Only asked for 1 backend thread - operating unthreaded"<<endl;
+ g_log<<Logger::Error<<"Only asked for 1 backend thread - operating unthreaded"<<endl;
try {
b=new Backend;
}
catch(const PDNSException &AE) {
- L<<Logger::Error<<"Distributor caught fatal exception: "<<AE.reason<<endl;
+ g_log<<Logger::Error<<"Distributor caught fatal exception: "<<AE.reason<<endl;
_exit(1);
}
catch(...) {
- L<<Logger::Error<<"Caught an unknown exception when creating backend, probably"<<endl;
+ g_log<<Logger::Error<<"Caught an unknown exception when creating backend, probably"<<endl;
_exit(1);
}
}
}
if (n<1) {
- L<<Logger::Error<<"Asked for fewer than 1 threads, nothing to do"<<endl;
+ g_log<<Logger::Error<<"Asked for fewer than 1 threads, nothing to do"<<endl;
_exit(1);
}
- L<<Logger::Warning<<"About to create "<<n<<" backend threads for UDP"<<endl;
+ g_log<<Logger::Warning<<"About to create "<<n<<" backend threads for UDP"<<endl;
for(int i=0;i<n;i++) {
pthread_create(&tid,0,&makeThread,static_cast<void *>(this));
Utility::usleep(50000); // we've overloaded mysql in the past :-)
}
- L<<Logger::Warning<<"Done launching threads, ready to distribute questions"<<endl;
+ g_log<<Logger::Warning<<"Done launching threads, ready to distribute questions"<<endl;
}
delete b;
b=NULL;
if (!allowRetry) {
- L<<Logger::Error<<"Backend error: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Backend error: "<<e.reason<<endl;
a=QD->Q->replyPacket();
a->setRcode(RCode::ServFail);
delete QD->Q;
} else {
- L<<Logger::Notice<<"Backend error (retry once): "<<e.reason<<endl;
+ g_log<<Logger::Notice<<"Backend error (retry once): "<<e.reason<<endl;
goto retry;
}
}
delete b;
b=NULL;
if (!allowRetry) {
- L<<Logger::Error<<"Caught unknown exception in Distributor thread "<<(long)pthread_self()<<endl;
+ g_log<<Logger::Error<<"Caught unknown exception in Distributor thread "<<(long)pthread_self()<<endl;
a=QD->Q->replyPacket();
a->setRcode(RCode::ServFail);
delete QD->Q;
} else {
- L<<Logger::Warning<<"Caught unknown exception in Distributor thread "<<(long)pthread_self()<<" (retry once)"<<endl;
+ g_log<<Logger::Warning<<"Caught unknown exception in Distributor thread "<<(long)pthread_self()<<" (retry once)"<<endl;
goto retry;
}
}
delete b;
}
catch(const PDNSException &AE) {
- L<<Logger::Error<<"Distributor caught fatal exception: "<<AE.reason<<endl;
+ g_log<<Logger::Error<<"Distributor caught fatal exception: "<<AE.reason<<endl;
_exit(1);
}
catch(...) {
- L<<Logger::Error<<"Caught an unknown exception when creating backend, probably"<<endl;
+ g_log<<Logger::Error<<"Caught an unknown exception when creating backend, probably"<<endl;
_exit(1);
}
return 0;
delete b;
b=NULL;
if (!allowRetry) {
- L<<Logger::Error<<"Backend error: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Backend error: "<<e.reason<<endl;
a=q->replyPacket();
a->setRcode(RCode::ServFail);
S.inc("servfail-packets");
S.ringAccount("servfail-queries",q->qdomain.toLogString());
} else {
- L<<Logger::Notice<<"Backend error (retry once): "<<e.reason<<endl;
+ g_log<<Logger::Notice<<"Backend error (retry once): "<<e.reason<<endl;
goto retry;
}
}
delete b;
b=NULL;
if (!allowRetry) {
- L<<Logger::Error<<"Caught unknown exception in Distributor thread "<<(unsigned long)pthread_self()<<endl;
+ g_log<<Logger::Error<<"Caught unknown exception in Distributor thread "<<(unsigned long)pthread_self()<<endl;
a=q->replyPacket();
a->setRcode(RCode::ServFail);
S.inc("servfail-packets");
S.ringAccount("servfail-queries",q->qdomain.toLogString());
} else {
- L<<Logger::Warning<<"Caught unknown exception in Distributor thread "<<(unsigned long)pthread_self()<<" (retry once)"<<endl;
+ g_log<<Logger::Warning<<"Caught unknown exception in Distributor thread "<<(unsigned long)pthread_self()<<" (retry once)"<<endl;
goto retry;
}
}
if(d_queued > d_maxQueueLength) {
- L<<Logger::Error<< d_queued <<" questions waiting for database/backend attention. Limit is "<<::arg().asNum("max-queue-length")<<", respawning"<<endl;
+ g_log<<Logger::Error<< d_queued <<" questions waiting for database/backend attention. Limit is "<<::arg().asNum("max-queue-length")<<", respawning"<<endl;
// this will leak the entire contents of all pipes, nothing will be freed. Respawn when this happens!
throw DistributorFatal();
}
#define FLAGS_CD_OFFSET (12)
#endif
-#define L theL()
extern time_t s_starttime;
uint32_t hashQuestion(const char* packet, uint16_t len, uint32_t init);
// TODO: Implement this?
DIR *dir=opendir(arg()["module-dir"].c_str());
if(!dir) {
- L<<Logger::Error<<"Unable to open module directory '"<<arg()["module-dir"]<<"'"<<endl;
+ g_log<<Logger::Error<<"Unable to open module directory '"<<arg()["module-dir"]<<"'"<<endl;
return;
}
struct dirent *entry;
res=UeberBackend::loadmodule(arg()["module-dir"]+"/"+module);
if(res==false) {
- L<<Logger::Error<<"DNSBackend unable to load module in "<<module<<endl;
+ g_log<<Logger::Error<<"DNSBackend unable to load module in "<<module<<endl;
exit(1);
}
}
}
}
catch(PDNSException &ae) {
- L<<Logger::Error<<"Caught an exception instantiating a backend: "<<ae.reason<<endl;
- L<<Logger::Error<<"Cleaning up"<<endl;
+ g_log<<Logger::Error<<"Caught an exception instantiating a backend: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Cleaning up"<<endl;
for(vector<DNSBackend *>::const_iterator i=ret.begin();i!=ret.end();++i)
delete *i;
throw;
} catch(...) {
// and cleanup
- L<<Logger::Error<<"Caught an exception instantiating a backend, cleaning up"<<endl;
+ g_log<<Logger::Error<<"Caught an exception instantiating a backend, cleaning up"<<endl;
for(vector<DNSBackend *>::const_iterator i=ret.begin();i!=ret.end();++i)
delete *i;
throw;
}
if(!unmodifiedSerial && !sd.serial) { // magic time!
- DLOG(L<<Logger::Warning<<"Doing SOA serial number autocalculation for "<<rr.qname<<endl);
+ DLOG(g_log<<Logger::Warning<<"Doing SOA serial number autocalculation for "<<rr.qname<<endl);
uint32_t serial = 0;
if (calculateSOASerial(domain, sd, serial)) {
sd.serial = serial;
- //DLOG(L<<"autocalculated soa serialnumber for "<<rr.qname<<" is "<<newest<<endl);
+ //DLOG(g_log<<"autocalculated soa serialnumber for "<<rr.qname<<" is "<<newest<<endl);
} else {
- DLOG(L<<"soa serialnumber calculation failed for "<<rr.qname<<endl);
+ DLOG(g_log<<"soa serialnumber calculation failed for "<<rr.qname<<endl);
}
}
uint32_t newest=0;
if(!(this->list(domain, sd.domain_id))) {
- DLOG(L<<Logger::Warning<<"Backend error trying to determine magic serial number of zone '"<<domain<<"'"<<endl);
+ DLOG(g_log<<Logger::Warning<<"Backend error trying to determine magic serial number of zone '"<<domain<<"'"<<endl);
return false;
}
#include "sstuff.hh"
#include "namespaces.hh"
-#undef L
#include "dnsdist.hh"
GlobalStateHolder<vector<CarbonConfig> > g_carbon;
bindAny(cs->local.sin4.sin_family, cs->udpFD);
// if (!setSocketTimestamps(cs->udpFD))
- // L<<Logger::Warning<<"Unable to enable timestamp reporting for socket"<<endl;
+ // g_log<<Logger::Warning<<"Unable to enable timestamp reporting for socket"<<endl;
if(IsAnyAddress(cs->local)) {
DNSPacket::DNSPacket(const DNSPacket &orig)
{
- DLOG(L<<"DNSPacket copy constructor called!"<<endl);
+ DLOG(g_log<<"DNSPacket copy constructor called!"<<endl);
d_socket=orig.d_socket;
d_remote=orig.d_remote;
d_dt=orig.d_dt;
}
}
catch(std::exception& e) {
- L<<Logger::Warning<<"Exception: "<<e.what()<<endl;
+ g_log<<Logger::Warning<<"Exception: "<<e.what()<<endl;
throw;
}
}
{
d_rawpacket.assign(mesg,length);
if(length < 12) {
- L << Logger::Debug << "Ignoring packet: too short ("<<length<<" < 12) from "
+ g_log << Logger::Debug << "Ignoring packet: too short ("<<length<<" < 12) from "
<< d_remote.toStringWithPort()<< endl;
return -1;
}
// cast can fail, f.e. if d_content is an UnknownRecordContent.
shared_ptr<TSIGRecordContent> content = std::dynamic_pointer_cast<TSIGRecordContent>(i->first.d_content);
if (!content) {
- L<<Logger::Error<<"TSIG record has no or invalid content (invalid packet)"<<endl;
+ g_log<<Logger::Error<<"TSIG record has no or invalid content (invalid packet)"<<endl;
return false;
}
*trc = *content;
for(MOADNSParser::answers_t::const_iterator i=mdp.d_answers.begin(); i!=mdp.d_answers.end(); ++i) {
if (gotit) {
- L<<Logger::Error<<"More than one TKEY record found in query"<<endl;
+ g_log<<Logger::Error<<"More than one TKEY record found in query"<<endl;
return false;
}
// cast can fail, f.e. if d_content is an UnknownRecordContent.
shared_ptr<TKEYRecordContent> content = std::dynamic_pointer_cast<TKEYRecordContent>(i->first.d_content);
if (!content) {
- L<<Logger::Error<<"TKEY record has no or invalid content (invalid packet)"<<endl;
+ g_log<<Logger::Error<<"TKEY record has no or invalid content (invalid packet)"<<endl;
return false;
}
*tr = *content;
d_rawpacket.assign(mesg,length);
d_wrapped=true;
if(length < 12) {
- L << Logger::Warning << "Ignoring packet: too short from "
+ g_log << Logger::Warning << "Ignoring packet: too short from "
<< getRemote() << endl;
return -1;
}
if(!ntohs(d.qdcount)) {
if(!d_tcp) {
- L << Logger::Warning << "No question section in packet from " << getRemote() <<", error="<<RCode::to_s(d.rcode)<<endl;
+ g_log << Logger::Warning << "No question section in packet from " << getRemote() <<", error="<<RCode::to_s(d.rcode)<<endl;
return -1;
}
}
string secret64;
if (tt.algo != DNSName("gss-tsig")) {
if(!B->getTSIGKey(*keyname, &tt.algo, &secret64)) {
- L<<Logger::Error<<"Packet for domain '"<<this->qdomain<<"' denied: can't find TSIG key with name '"<<*keyname<<"' and algorithm '"<<tt.algo<<"'"<<endl;
+ g_log<<Logger::Error<<"Packet for domain '"<<this->qdomain<<"' denied: can't find TSIG key with name '"<<*keyname<<"' and algorithm '"<<tt.algo<<"'"<<endl;
return false;
}
B64Decode(secret64, *secret);
result = validateTSIG(d_rawpacket, tsigPos, tt, *trc, "", trc->d_mac, false);
}
catch(const std::runtime_error& err) {
- L<<Logger::Error<<"Packet for '"<<this->qdomain<<"' denied: "<<err.what()<<endl;
+ g_log<<Logger::Error<<"Packet for '"<<this->qdomain<<"' denied: "<<err.what()<<endl;
return false;
}
}
d_xor=dns_random(0xffff);
- L<<Logger::Error<<"DNS Proxy launched, local port "<<ntohs(local.sin4.sin_port)<<", remote "<<d_remote.toStringWithPort()<<endl;
+ g_log<<Logger::Error<<"DNS Proxy launched, local port "<<ntohs(local.sin4.sin_port)<<", remote "<<d_remote.toStringWithPort()<<endl;
}
void DNSProxy::go()
ret2 = stubDoResolve(target, QType::AAAA, ips);
if(ret1 != RCode::NoError || ret2 != RCode::NoError) {
- L<<Logger::Error<<"Error resolving for ALIAS "<<target<<", returning SERVFAIL"<<endl;
+ g_log<<Logger::Error<<"Error resolving for ALIAS "<<target<<", returning SERVFAIL"<<endl;
}
for (auto &ip : ips)
pw.getHeader()->id=id ^ d_xor;
if(send(d_sock,&packet[0], packet.size() , 0)<0) { // zoom
- L<<Logger::Error<<"Unable to send a packet to our recursing backend: "<<stringerror()<<endl;
+ g_log<<Logger::Error<<"Unable to send a packet to our recursing backend: "<<stringerror()<<endl;
}
return true;
}
else if(i->second.created<time(0)-60) {
if(i->second.created) {
- L<<Logger::Warning<<"Recursive query for remote "<<
+ g_log<<Logger::Warning<<"Recursive query for remote "<<
i->second.remote.toStringWithPort()<<" with internal id "<<n<<
" was not answered by backend within timeout, reusing id"<<endl;
delete i->second.complete;
len=recvfrom(d_sock, buffer, sizeof(buffer),0, (struct sockaddr*) &fromaddr, &fromaddrSize); // answer from our backend
if(len<(ssize_t)sizeof(dnsheader)) {
if(len<0)
- L<<Logger::Error<<"Error receiving packet from recursor backend: "<<stringerror()<<endl;
+ g_log<<Logger::Error<<"Error receiving packet from recursor backend: "<<stringerror()<<endl;
else if(len==0)
- L<<Logger::Error<<"Error receiving packet from recursor backend, EOF"<<endl;
+ g_log<<Logger::Error<<"Error receiving packet from recursor backend, EOF"<<endl;
else
- L<<Logger::Error<<"Short packet from recursor backend, "<<len<<" bytes"<<endl;
+ g_log<<Logger::Error<<"Short packet from recursor backend, "<<len<<" bytes"<<endl;
continue;
}
if (fromaddr != d_remote) {
- L<<Logger::Error<<"Got answer from unexpected host "<<fromaddr.toStringWithPort()<<" instead of our recursor backend "<<d_remote.toStringWithPort()<<endl;
+ g_log<<Logger::Error<<"Got answer from unexpected host "<<fromaddr.toStringWithPort()<<" instead of our recursor backend "<<d_remote.toStringWithPort()<<endl;
continue;
}
(*d_resanswers)++;
#endif
map_t::iterator i=d_conntrack.find(d.id^d_xor);
if(i==d_conntrack.end()) {
- L<<Logger::Error<<"Discarding untracked packet from recursor backend with id "<<(d.id^d_xor)<<
+ g_log<<Logger::Error<<"Discarding untracked packet from recursor backend with id "<<(d.id^d_xor)<<
". Conntrack table size="<<d_conntrack.size()<<endl;
continue;
}
else if(i->second.created==0) {
- L<<Logger::Error<<"Received packet from recursor backend with id "<<(d.id^d_xor)<<" which is a duplicate"<<endl;
+ g_log<<Logger::Error<<"Received packet from recursor backend with id "<<(d.id^d_xor)<<" which is a duplicate"<<endl;
continue;
}
q.parse(buffer,(size_t)len);
if(p.qtype.getCode() != i->second.qtype || p.qdomain != i->second.qname) {
- L<<Logger::Error<<"Discarding packet from recursor backend with id "<<(d.id^d_xor)<<
+ g_log<<Logger::Error<<"Discarding packet from recursor backend with id "<<(d.id^d_xor)<<
", qname or qtype mismatch ("<<p.qtype.getCode()<<" v " <<i->second.qtype<<", "<<p.qdomain<<" v "<<i->second.qname<<")"<<endl;
continue;
}
addCMsgSrcAddr(&msgh, cbuf, i->second.anyLocal.get_ptr(), 0);
}
if(sendmsg(i->second.outsock, &msgh, 0) < 0)
- L<<Logger::Warning<<"dnsproxy.cc: Error sending reply with sendmsg (socket="<<i->second.outsock<<"): "<<strerror(errno)<<endl;
+ g_log<<Logger::Warning<<"dnsproxy.cc: Error sending reply with sendmsg (socket="<<i->second.outsock<<"): "<<strerror(errno)<<endl;
i->second.created=0;
}
}
}
catch(PDNSException &ae) {
- L<<Logger::Error<<"Fatal error in DNS proxy: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Fatal error in DNS proxy: "<<ae.reason<<endl;
}
catch(std::exception &e) {
- L<<Logger::Error<<"Communicator thread died because of STL error: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Communicator thread died because of STL error: "<<e.what()<<endl;
}
catch( ... )
{
- L << Logger::Error << "Caught unknown exception." << endl;
+ g_log << Logger::Error << "Caught unknown exception." << endl;
}
- L<<Logger::Error<<"Exiting because DNS proxy failed"<<endl;
+ g_log<<Logger::Error<<"Exiting because DNS proxy failed"<<endl;
_exit(1);
}
#include "dnsrecords.hh"
#include "ednssubnet.hh"
#include "ednsoptions.hh"
-// this is needed because boost multi_index also uses 'L', as do we (which is sad enough)
-#undef L
#include <set>
#include <deque>
#include "sstuff.hh"
#include "anadns.hh"
-// this is needed because boost multi_index also uses 'L', as do we (which is sad enough)
-#undef L
#include <set>
#include <deque>
WriteLock l(&g_signatures_lock);
if(g_cacheweekno < weekno || g_signatures.size() >= (uint) maxcachesize) { // blunt but effective (C) Habbie, mind04
- L<<Logger::Warning<<"Cleared signature cache."<<endl;
+ g_log<<Logger::Warning<<"Cleared signature cache."<<endl;
g_signatures.clear();
g_cacheweekno = weekno;
}
if(parts[0]=="QUIT") {
s_pleasequit=true;
ret="Scheduling exit";
- L<<Logger::Error<<"Scheduling exit on remote request"<<endl;
+ g_log<<Logger::Error<<"Scheduling exit on remote request"<<endl;
}
return ret;
}
return "Unable to convert '"+parts[2]+"' to an IP address";
}
- L<<Logger::Warning<<"Notification request to host "<<parts[2]<<" for domain '"<<domain<<"' received from operator"<<endl;
+ g_log<<Logger::Warning<<"Notification request to host "<<parts[2]<<" for domain '"<<domain<<"' received from operator"<<endl;
Communicator.notify(domain, parts[2]);
return "Added to queue";
}
return "syntax: notify domain";
if(!::arg().mustDo("master") && !::arg().mustDo("slave-renotify"))
return "PowerDNS not configured as master or slave with re-notifications";
- L<<Logger::Warning<<"Notification request for domain '"<<parts[1]<<"' received from operator"<<endl;
+ g_log<<Logger::Warning<<"Notification request for domain '"<<parts[1]<<"' received from operator"<<endl;
if (parts[1] == "*") {
vector<DomainInfo> domains;
{
UeberBackend B;
try {
- L<<Logger::Error<<"Rediscovery was requested"<<endl;
+ g_log<<Logger::Error<<"Rediscovery was requested"<<endl;
string status="Ok";
B.rediscover(&status);
return status;
{
UeberBackend B;
B.reload();
- L<<Logger::Error<<"Reload was requested"<<endl;
+ g_log<<Logger::Error<<"Reload was requested"<<endl;
return "Ok";
}
string DLListZones(const vector<string>&parts, Utility::pid_t ppid)
{
UeberBackend B;
- L<<Logger::Notice<<"Received request to list zones."<<endl;
+ g_log<<Logger::Notice<<"Received request to list zones."<<endl;
vector<DomainInfo> domains;
B.getAllDomains(&domains);
ostringstream ret;
if(d_s < 0) {
if (family == AF_UNIX)
- L<<Logger::Error<<"Unable to create control socket at '"<<((struct sockaddr_un*)local)->sun_path<<"', reason: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to create control socket at '"<<((struct sockaddr_un*)local)->sun_path<<"', reason: "<<strerror(errno)<<endl;
else
- L<<Logger::Error<<"Unable to create control socket on '"<<((ComboAddress *)local)->toStringWithPort()<<"', reason: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to create control socket on '"<<((ComboAddress *)local)->toStringWithPort()<<"', reason: "<<strerror(errno)<<endl;
exit(1);
}
if(bind(d_s, local, len) < 0) {
if (family == AF_UNIX)
- L<<Logger::Critical<<"Unable to bind to control socket at '"<<((struct sockaddr_un*)local)->sun_path<<"', reason: "<<strerror(errno)<<endl;
+ g_log<<Logger::Critical<<"Unable to bind to control socket at '"<<((struct sockaddr_un*)local)->sun_path<<"', reason: "<<strerror(errno)<<endl;
else
- L<<Logger::Critical<<"Unable to bind to control socket on '"<<((ComboAddress *)local)->toStringWithPort()<<"', reason: "<<strerror(errno)<<endl;
+ g_log<<Logger::Critical<<"Unable to bind to control socket on '"<<((ComboAddress *)local)->toStringWithPort()<<"', reason: "<<strerror(errno)<<endl;
exit(1);
}
}
}
if (makeUNsockaddr(fname, &addr)) {
- L<<Logger::Critical<<"Unable to open controlsocket, path '"<<fname<<"' is not a valid UNIX socket path."<<endl;
+ g_log<<Logger::Critical<<"Unable to open controlsocket, path '"<<fname<<"' is not a valid UNIX socket path."<<endl;
exit(1);
}
void DynListener::listenOnUnixDomain(const string& fname)
{
if(testLive(fname)) {
- L<<Logger::Critical<<"Previous controlsocket '"<<fname<<"' is in use"<<endl;
+ g_log<<Logger::Critical<<"Previous controlsocket '"<<fname<<"' is in use"<<endl;
exit(1);
}
int err=unlink(fname.c_str());
if(err < 0 && errno!=ENOENT) {
- L<<Logger::Critical<<"Unable to remove (previous) controlsocket at '"<<fname<<"': "<<strerror(errno)<<endl;
+ g_log<<Logger::Critical<<"Unable to remove (previous) controlsocket at '"<<fname<<"': "<<strerror(errno)<<endl;
exit(1);
}
struct sockaddr_un local;
if (makeUNsockaddr(fname, &local)) {
- L<<Logger::Critical<<"Unable to bind to controlsocket, path '"<<fname<<"' is not a valid UNIX socket path."<<endl;
+ g_log<<Logger::Critical<<"Unable to bind to controlsocket, path '"<<fname<<"' is not a valid UNIX socket path."<<endl;
exit(1);
}
d_socketname=fname;
if(!arg()["setgid"].empty()) {
if(chmod(fname.c_str(),0660)<0)
- L<<Logger::Error<<"Unable to change group access mode of controlsocket at '"<<fname<<"', reason: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to change group access mode of controlsocket at '"<<fname<<"', reason: "<<strerror(errno)<<endl;
if(chown(fname.c_str(),static_cast<uid_t>(-1),Utility::makeGidNumeric(arg()["setgid"]))<0)
- L<<Logger::Error<<"Unable to change group ownership of controlsocket at '"<<fname<<"', reason: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to change group ownership of controlsocket at '"<<fname<<"', reason: "<<strerror(errno)<<endl;
}
listen(d_s, 10);
- L<<Logger::Warning<<"Listening on controlsocket in '"<<fname<<"'"<<endl;
+ g_log<<Logger::Warning<<"Listening on controlsocket in '"<<fname<<"'"<<endl;
d_nonlocal=true;
}
listen(d_s, 10);
d_socketaddress=local;
- L<<Logger::Warning<<"Listening on controlsocket on '"<<local.toStringWithPort()<<"'"<<endl;
+ g_log<<Logger::Warning<<"Listening on controlsocket on '"<<local.toStringWithPort()<<"'"<<endl;
d_nonlocal=true;
if(!::arg()["tcp-control-range"].empty()) {
d_tcprange.toMasks(::arg()["tcp-control-range"]);
- L<<Logger::Warning<<"Only allowing TCP control from: "<<d_tcprange.toString()<<endl;
+ g_log<<Logger::Warning<<"Only allowing TCP control from: "<<d_tcprange.toString()<<endl;
}
}
cleanSlashes(socketname);
if(!mkdir(socketname.c_str(),0700)) // make /var directory, if needed
- L<<Logger::Warning<<"Created local state directory '"<<socketname<<"'"<<endl;
+ g_log<<Logger::Warning<<"Created local state directory '"<<socketname<<"'"<<endl;
else if(errno!=EEXIST) {
- L<<Logger::Critical<<"Unable to create socket directory ("<<socketname<<") and it does not exist yet"<<endl;
+ g_log<<Logger::Critical<<"Unable to create socket directory ("<<socketname<<") and it does not exist yet"<<endl;
exit(1);
}
{
DynListener *us=static_cast<DynListener *>(p);
us->theListener();
- L<<Logger::Error<<"Control listener aborted, please file a bug!"<<endl;
+ g_log<<Logger::Error<<"Control listener aborted, please file a bug!"<<endl;
return 0;
}
d_client=accept(d_s,(sockaddr*)&remote,&remlen);
if(d_client<0) {
if(errno!=EINTR)
- L<<Logger::Error<<"Unable to accept controlsocket connection ("<<d_s<<"): "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to accept controlsocket connection ("<<d_s<<"): "<<strerror(errno)<<endl;
continue;
}
if(d_tcp && !d_tcprange.match(&remote)) { // checks if the remote is within the permitted range.
- L<<Logger::Error<<"Access denied to remote "<<remote.toString()<<" because not allowed"<<endl;
+ g_log<<Logger::Error<<"Access denied to remote "<<remote.toString()<<" because not allowed"<<endl;
writen2(d_client, "Access denied to "+remote.toString()+"\n");
close(d_client);
continue;
std::shared_ptr<FILE> fp=std::shared_ptr<FILE>(fdopen(dup(d_client), "r"), fclose);
if(d_tcp) {
if(!fgets(&mesg[0], mesg.size(), fp.get())) {
- L<<Logger::Error<<"Unable to receive password from controlsocket ("<<d_client<<"): "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to receive password from controlsocket ("<<d_client<<"): "<<strerror(errno)<<endl;
close(d_client);
continue;
}
string password(&mesg[0]);
boost::trim(password);
if(password.empty() || password!=arg()["tcp-control-secret"]) {
- L<<Logger::Error<<"Wrong password on TCP control socket"<<endl;
+ g_log<<Logger::Error<<"Wrong password on TCP control socket"<<endl;
writen2(d_client, "Wrong password");
close(d_client);
errno=0;
if(!fgets(&mesg[0], mesg.size(), fp.get())) {
if(errno)
- L<<Logger::Error<<"Unable to receive line from controlsocket ("<<d_client<<"): "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to receive line from controlsocket ("<<d_client<<"): "<<strerror(errno)<<endl;
close(d_client);
continue;
}
if(strlen(&mesg[0]) == mesg.size()) {
- L<<Logger::Error<<"Line on controlsocket ("<<d_client<<") was too long"<<endl;
+ g_log<<Logger::Error<<"Line on controlsocket ("<<d_client<<") was too long"<<endl;
close(d_client);
continue;
}
ret=send(d_client, l.c_str()+sent, l.length()-sent, 0);
if(ret<0 || !ret) {
- L<<Logger::Error<<"Error sending data to pdns_control: "<<stringerror()<<endl;
+ g_log<<Logger::Error<<"Error sending data to pdns_control: "<<stringerror()<<endl;
break;
}
sent+=ret;
lines.append(1, '\0');
lines.append(1, '\n');
if((unsigned int)write(1, lines.c_str(), lines.length()) != lines.length())
- L<<Logger::Error<<"Error sending data to console: "<<stringerror()<<endl;
+ g_log<<Logger::Error<<"Error sending data to console: "<<stringerror()<<endl;
}
}
sendlines("Unknown command: '"+parts[0]+"'");
}
catch(PDNSException &AE) {
- L<<Logger::Error<<"Non-fatal error in control listener command '"<<line<<"': "<<AE.reason<<endl;
+ g_log<<Logger::Error<<"Non-fatal error in control listener command '"<<line<<"': "<<AE.reason<<endl;
}
catch(string &E) {
- L<<Logger::Error<<"Non-fatal error 2 in control listener command '"<<line<<"': "<<E<<endl;
+ g_log<<Logger::Error<<"Non-fatal error 2 in control listener command '"<<line<<"': "<<E<<endl;
}
catch(std::exception& e) {
- L<<Logger::Error<<"Non-fatal STL error in control listener command '"<<line<<"': "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Non-fatal STL error in control listener command '"<<line<<"': "<<e.what()<<endl;
}
catch(...) {
- L<<Logger::Error<<"Non-fatal error in control listener command '"<<line<<"': unknown exception occurred"<<endl;
+ g_log<<Logger::Error<<"Non-fatal error in control listener command '"<<line<<"': unknown exception occurred"<<endl;
}
}
}
catch(PDNSException &AE) {
- L<<Logger::Error<<"Fatal error in control listener: "<<AE.reason<<endl;
+ g_log<<Logger::Error<<"Fatal error in control listener: "<<AE.reason<<endl;
}
catch(string &E) {
- L<<Logger::Error<<"Fatal error 2 in control listener: "<<E<<endl;
+ g_log<<Logger::Error<<"Fatal error 2 in control listener: "<<E<<endl;
}
catch(std::exception& e) {
- L<<Logger::Error<<"Fatal STL error in control listener: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Fatal STL error in control listener: "<<e.what()<<endl;
}
catch(...) {
- L<<Logger::Error<<"Fatal: unknown exception in control listener occurred"<<endl;
+ g_log<<Logger::Error<<"Fatal: unknown exception in control listener occurred"<<endl;
}
}
string tmp_mac;
GssContext gssctx(context);
if (!gssctx.valid()) {
- L<<Logger::Error<<"GSS context '"<<context<<"' is not valid"<<endl;
+ g_log<<Logger::Error<<"GSS context '"<<context<<"' is not valid"<<endl;
for(const string& error : gssctx.getErrorStrings()) {
- L<<Logger::Error<<"GSS error: "<<error<<endl;;
+ g_log<<Logger::Error<<"GSS error: "<<error<<endl;;
}
return false;
}
if (!gssctx.sign(message, tmp_mac)) {
- L<<Logger::Error<<"Could not sign message using GSS context '"<<context<<"'"<<endl;
+ g_log<<Logger::Error<<"Could not sign message using GSS context '"<<context<<"'"<<endl;
for(const string& error : gssctx.getErrorStrings()) {
- L<<Logger::Error<<"GSS error: "<<error<<endl;;
+ g_log<<Logger::Error<<"GSS error: "<<error<<endl;;
}
return false;
}
bool gss_verify_signature(const DNSName& context, const std::string& message, const std::string& mac) {
GssContext gssctx(context);
if (!gssctx.valid()) {
- L<<Logger::Error<<"GSS context '"<<context<<"' is not valid"<<endl;
+ g_log<<Logger::Error<<"GSS context '"<<context<<"' is not valid"<<endl;
for(const string& error : gssctx.getErrorStrings()) {
- L<<Logger::Error<<"GSS error: "<<error<<endl;;
+ g_log<<Logger::Error<<"GSS error: "<<error<<endl;;
}
return false;
}
if (!gssctx.verify(message, mac)) {
- L<<Logger::Error<<"Could not verify message using GSS context '"<<context<<"'"<<endl;
+ g_log<<Logger::Error<<"Could not verify message using GSS context '"<<context<<"'"<<endl;
for(const string& error : gssctx.getErrorStrings()) {
- L<<Logger::Error<<"GSS error: "<<error<<endl;;
+ g_log<<Logger::Error<<"GSS error: "<<error<<endl;;
}
return false;
}
#include "namespaces.hh"
pthread_once_t Logger::s_once;
-pthread_key_t Logger::s_loggerKey;
+pthread_key_t Logger::g_loggerKey;
-Logger &theL(const string &pname)
-{
- static Logger l("", LOG_DAEMON);
- if(!pname.empty())
- l.setName(pname);
- return l;
-}
+Logger g_log("", LOG_DAEMON);
void Logger::log(const string &msg, Urgency u)
{
void Logger::initKey()
{
- if(pthread_key_create(&s_loggerKey, perThreadDestructor))
+ if(pthread_key_create(&g_loggerKey, perThreadDestructor))
unixDie("Creating thread key for logger");
}
Logger::PerThread* Logger::getPerThread()
{
- void *buf=pthread_getspecific(s_loggerKey);
+ void *buf=pthread_getspecific(g_loggerKey);
PerThread* ret;
if(buf)
ret = (PerThread*) buf;
else {
ret = new PerThread();
- pthread_setspecific(s_loggerKey, (void*)ret);
+ pthread_setspecific(g_loggerKey, (void*)ret);
}
return ret;
}
void resetFlags(){flags=0;open();} //!< zero the flags
/** Use this to stream to your log, like this:
\code
- L<<"This is an informational message"<<endl; // logged at default loglevel (Info)
- L<<Logger::Warning<<"Out of diskspace"<<endl; // Logged as a warning
- L<<"This is an informational message"<<endl; // logged AGAIN at default loglevel (Info)
+ g_log<<"This is an informational message"<<endl; // logged at default loglevel (Info)
+ g_log<<Logger::Warning<<"Out of diskspace"<<endl; // Logged as a warning
+ g_log<<"This is an informational message"<<endl; // logged AGAIN at default loglevel (Info)
\endcode
*/
Logger& operator<<(const char *s);
bool d_disableSyslog;
bool d_timestamps{true};
static pthread_once_t s_once;
- static pthread_key_t s_loggerKey;
+ static pthread_key_t g_loggerKey;
};
-extern Logger &theL(const string &pname="");
+extern Logger g_log;
#ifdef VERBOSELOG
#define DLOG(x) x
#else
-#undef L
#include "ext/luawrapper/include/LuaContext.hpp"
void AuthLua4::postPrepareContext() {
void BaseLua4::loadFile(const std::string &fname) {
std::ifstream ifs(fname);
if(!ifs) {
- theL()<<Logger::Error<<"Unable to read configuration file from '"<<fname<<"': "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to read configuration file from '"<<fname<<"': "<<strerror(errno)<<endl;
return;
}
loadStream(ifs);
#else
-#undef L
#include "ext/luawrapper/include/LuaContext.hpp"
void BaseLua4::prepareContext() {
else
cas.insert(boost::get<ComboAddress>(in));
}
- catch(std::exception& e) { theL() <<Logger::Error<<e.what()<<endl; }
+ catch(std::exception& e) { g_log <<Logger::Error<<e.what()<<endl; }
});
d_lw->registerFunction<bool(cas_t::*)(const ComboAddress&)>("check",[](const cas_t& cas, const ComboAddress&ca) { return cas.count(ca)>0; });
d_lw->registerFunction<bool(ComboAddress::*)(const ComboAddress&)>("equal", [](const ComboAddress& lhs, const ComboAddress& rhs) { return ComboAddress::addressOnlyEqual()(lhs, rhs); });
d_lw->registerFunction<void(DNSRecord::*)(const std::string&)>("changeContent", [](DNSRecord& dr, const std::string& newContent) { dr.d_content = shared_ptr<DNSRecordContent>(DNSRecordContent::mastermake(dr.d_type, 1, newContent)); });
// pdnsload
- d_lw->writeFunction("pdnslog", [](const std::string& msg, boost::optional<int> loglevel) { theL() << (Logger::Urgency)loglevel.get_value_or(Logger::Warning) << msg<<endl; });
+ d_lw->writeFunction("pdnslog", [](const std::string& msg, boost::optional<int> loglevel) { g_log << (Logger::Urgency)loglevel.get_value_or(Logger::Warning) << msg<<endl; });
// certain constants
d_pd.push_back({"PASS", (int)PolicyDecision::PASS});
class LuaContext;
#if defined(HAVE_LUA)
-#undef L
#include "ext/luawrapper/include/LuaContext.hpp"
-#define L theL()
#endif
class BaseLua4 : public boost::noncopyable
}
}
catch(std::exception& e) {
- theL() <<Logger::Error<<e.what()<<endl;
+ g_log <<Logger::Error<<e.what()<<endl;
}
}
);
dq.udpAnswer = GenUDPQueryResponse(dq.udpQueryDest, dq.udpQuery);
auto cbFunc = d_lw->readVariable<boost::optional<luacall_t>>(dq.udpCallback).get_value_or(0);
if(!cbFunc) {
- theL()<<Logger::Error<<"Attempted callback for Lua UDP Query/Response which could not be found"<<endl;
+ g_log<<Logger::Error<<"Attempted callback for Lua UDP Query/Response which could not be found"<<endl;
return false;
}
bool result=cbFunc(&dq);
if(domain != mdp.d_qname) {
if(!mdp.d_qname.empty() && domain.toString().find((char)0) == string::npos /* ugly */) {// embedded nulls are too noisy, plus empty domains are too
- L<<Logger::Notice<<"Packet purporting to come from remote server "<<ip.toString()<<" contained wrong answer: '" << domain << "' != '" << mdp.d_qname << "'" << endl;
+ g_log<<Logger::Notice<<"Packet purporting to come from remote server "<<ip.toString()<<" contained wrong answer: '" << domain << "' != '" << mdp.d_qname << "'" << endl;
}
// unexpected count has already been done @ pdns_recursor.cc
goto out;
}
catch(std::exception &mde) {
if(::arg().mustDo("log-common-errors"))
- L<<Logger::Notice<<"Unable to parse packet from remote server "<<ip.toString()<<": "<<mde.what()<<endl;
+ g_log<<Logger::Notice<<"Unable to parse packet from remote server "<<ip.toString()<<": "<<mde.what()<<endl;
lwr->d_rcode = RCode::FormErr;
g_stats.serverParseError++;
#ifdef HAVE_PROTOBUF
return 1; // success - oddly enough
}
catch(...) {
- L<<Logger::Notice<<"Unknown error parsing packet from remote server"<<endl;
+ g_log<<Logger::Notice<<"Unknown error parsing packet from remote server"<<endl;
}
g_stats.serverParseError++;
for(set<string>::const_iterator j=nsset.begin();j!=nsset.end();++j) {
vector<string> nsips=fns.lookup(DNSName(*j), B);
if(nsips.empty())
- L<<Logger::Warning<<"Unable to queue notification of domain '"<<di.zone<<"': nameservers do not resolve!"<<endl;
+ g_log<<Logger::Warning<<"Unable to queue notification of domain '"<<di.zone<<"': nameservers do not resolve!"<<endl;
else
for(vector<string>::const_iterator k=nsips.begin();k!=nsips.end();++k) {
const ComboAddress caIp(*k, 53);
if(!d_preventSelfNotification || !AddressIsUs(caIp)) {
if(!d_onlyNotify.match(&caIp))
- L<<Logger::Info<<"Skipped notification of domain '"<<di.zone<<"' to "<<*j<<" because it does not match only-notify."<<endl;
+ g_log<<Logger::Info<<"Skipped notification of domain '"<<di.zone<<"' to "<<*j<<" because it does not match only-notify."<<endl;
else
ips.insert(caIp.toStringWithPort());
}
}
for(set<string>::const_iterator j=ips.begin();j!=ips.end();++j) {
- L<<Logger::Warning<<"Queued notification of domain '"<<di.zone<<"' to "<<*j<<endl;
+ g_log<<Logger::Warning<<"Queued notification of domain '"<<di.zone<<"' to "<<*j<<endl;
d_nq.add(di.zone,*j);
hasQueuedItem=true;
}
for(set<string>::const_iterator j=alsoNotify.begin();j!=alsoNotify.end();++j) {
try {
const ComboAddress caIp(*j, 53);
- L<<Logger::Warning<<"Queued also-notification of domain '"<<di.zone<<"' to "<<caIp.toStringWithPort()<<endl;
+ g_log<<Logger::Warning<<"Queued also-notification of domain '"<<di.zone<<"' to "<<caIp.toStringWithPort()<<endl;
if (!ips.count(caIp.toStringWithPort())) {
ips.insert(caIp.toStringWithPort());
d_nq.add(di.zone, caIp.toStringWithPort());
hasQueuedItem=true;
}
catch(PDNSException &e) {
- L<<Logger::Warning<<"Unparseable IP in ALSO-NOTIFY metadata of domain '"<<di.zone<<"'. Warning: "<<e.reason<<endl;
+ g_log<<Logger::Warning<<"Unparseable IP in ALSO-NOTIFY metadata of domain '"<<di.zone<<"'. Warning: "<<e.reason<<endl;
}
}
if (!hasQueuedItem)
- L<<Logger::Warning<<"Request to queue notification for domain '"<<di.zone<<"' was processed, but no valid nameservers or ALSO-NOTIFYs found. Not notifying!"<<endl;
+ g_log<<Logger::Warning<<"Request to queue notification for domain '"<<di.zone<<"' was processed, but no valid nameservers or ALSO-NOTIFYs found. Not notifying!"<<endl;
}
DomainInfo di;
UeberBackend B;
if(!B.getDomainInfo(domain, di)) {
- L<<Logger::Error<<"No such domain '"<<domain<<"' in our database"<<endl;
+ g_log<<Logger::Error<<"No such domain '"<<domain<<"' in our database"<<endl;
return false;
}
queueNotifyDomain(di, &B);
if(cmdomains.empty()) {
if(d_masterschanged)
- L<<Logger::Warning<<"No master domains need notifications"<<endl;
+ g_log<<Logger::Warning<<"No master domains need notifications"<<endl;
d_masterschanged=false;
}
else {
d_masterschanged=true;
- L<<Logger::Error<<cmdomains.size()<<" domain"<<(cmdomains.size()>1 ? "s" : "")<<" for which we are master need"<<
+ g_log<<Logger::Error<<cmdomains.size()<<" domain"<<(cmdomains.size()>1 ? "s" : "")<<" for which we are master need"<<
(cmdomains.size()>1 ? "" : "s")<<
" notifications"<<endl;
}
p.setRemote(&from);
if(p.parse(buffer,(size_t)size)<0) {
- L<<Logger::Warning<<"Unable to parse SOA notification answer from "<<p.getRemote()<<endl;
+ g_log<<Logger::Warning<<"Unable to parse SOA notification answer from "<<p.getRemote()<<endl;
continue;
}
if(p.d.rcode)
- L<<Logger::Warning<<"Received unsuccessful notification report for '"<<p.qdomain<<"' from "<<from.toStringWithPort()<<", error: "<<RCode::to_s(p.d.rcode)<<endl;
+ g_log<<Logger::Warning<<"Received unsuccessful notification report for '"<<p.qdomain<<"' from "<<from.toStringWithPort()<<", error: "<<RCode::to_s(p.d.rcode)<<endl;
if(d_nq.removeIf(from.toStringWithPort(), p.d.id, p.qdomain))
- L<<Logger::Warning<<"Removed from notification list: '"<<p.qdomain<<"' to "<<from.toStringWithPort()<<" "<< (p.d.rcode ? RCode::to_s(p.d.rcode) : "(was acknowledged)")<<endl;
+ g_log<<Logger::Warning<<"Removed from notification list: '"<<p.qdomain<<"' to "<<from.toStringWithPort()<<" "<< (p.d.rcode ? RCode::to_s(p.d.rcode) : "(was acknowledged)")<<endl;
else {
- L<<Logger::Warning<<"Received spurious notify answer for '"<<p.qdomain<<"' from "<< from.toStringWithPort()<<endl;
+ g_log<<Logger::Warning<<"Received spurious notify answer for '"<<p.qdomain<<"' from "<< from.toStringWithPort()<<endl;
//d_nq.dump();
}
}
ComboAddress remote(ip, 53); // default to 53
if((d_nsock6 < 0 && remote.sin4.sin_family == AF_INET6) ||
(d_nsock4 < 0 && remote.sin4.sin_family == AF_INET)) {
- L<<Logger::Warning<<"Unable to notify "<<remote.toStringWithPort()<<" for domain '"<<domain<<"', address family is disabled. Is query-local-address"<<(remote.sin4.sin_family == AF_INET ? "" : "6")<<" unset?"<<endl;
+ g_log<<Logger::Warning<<"Unable to notify "<<remote.toStringWithPort()<<" for domain '"<<domain<<"', address family is disabled. Is query-local-address"<<(remote.sin4.sin_family == AF_INET ? "" : "6")<<" unset?"<<endl;
d_nq.removeIf(remote.toStringWithPort(), id, domain); // Remove, we'll never be able to notify
continue; // don't try to notify what we can't!
}
drillHole(domain, ip);
}
catch(ResolverException &re) {
- L<<Logger::Error<<"Error trying to resolve '"<<ip<<"' for notifying '"<<domain<<"' to server: "<<re.reason<<endl;
+ g_log<<Logger::Error<<"Error trying to resolve '"<<ip<<"' for notifying '"<<domain<<"' to server: "<<re.reason<<endl;
}
}
else
- L<<Logger::Error<<"Notification for "<<domain<<" to "<<ip<<" failed after retries"<<endl;
+ g_log<<Logger::Error<<"Notification for "<<domain<<" to "<<ip<<" failed after retries"<<endl;
}
return d_nq.earliest();
if (tsigkeyname.empty() == false) {
if (!B.getTSIGKey(tsigkeyname, &tsigalgorithm, &tsigsecret64)) {
- L<<Logger::Error<<"TSIG key '"<<tsigkeyname<<"' for domain '"<<domain<<"' not found"<<endl;
+ g_log<<Logger::Error<<"TSIG key '"<<tsigkeyname<<"' for domain '"<<domain<<"' not found"<<endl;
return;
}
TSIGRecordContent trc;
trc.d_origID=ntohs(id);
trc.d_eRcode=0;
if (B64Decode(tsigsecret64, tsigsecret) == -1) {
- L<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<tsigkeyname<<"' for domain '"<<domain<<"'"<<endl;
+ g_log<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<tsigkeyname<<"' for domain '"<<domain<<"'"<<endl;
return;
}
addTSIG(pw, trc, tsigkeyname, tsigsecret, "", false);
s=socket(AF_INET,SOCK_DGRAM,0);
if(s<0) {
- L<<Logger::Error<<"Unable to acquire UDP socket: "+string(strerror(errno)) << endl;
+ g_log<<Logger::Error<<"Unable to acquire UDP socket: "+string(strerror(errno)) << endl;
throw PDNSException("Unable to acquire a UDP socket: "+string(strerror(errno)));
}
setsockopt(s, IPPROTO_IP, GEN_IP_PKTINFO, &one, sizeof(one));
if (!setSocketTimestamps(s))
- L<<Logger::Warning<<"Unable to enable timestamp reporting for socket"<<endl;
+ g_log<<Logger::Warning<<"Unable to enable timestamp reporting for socket"<<endl;
#ifdef SO_REUSEPORT
if( d_can_reuseport )
string binderror = strerror(errno);
close(s);
if( errno == EADDRNOTAVAIL && ! ::arg().mustDo("local-address-nonexist-fail") ) {
- L<<Logger::Error<<"IPv4 Address " << localname << " does not exist on this server - skipping UDP bind" << endl;
+ g_log<<Logger::Error<<"IPv4 Address " << localname << " does not exist on this server - skipping UDP bind" << endl;
continue;
} else {
- L<<Logger::Error<<"Unable to bind UDP socket to '"+locala.toStringWithPort()+"': "<<binderror<<endl;
+ g_log<<Logger::Error<<"Unable to bind UDP socket to '"+locala.toStringWithPort()+"': "<<binderror<<endl;
throw PDNSException("Unable to bind to UDP socket");
}
}
d_sockets.push_back(s);
- L<<Logger::Error<<"UDP server bound to "<<locala.toStringWithPort()<<endl;
+ g_log<<Logger::Error<<"UDP server bound to "<<locala.toStringWithPort()<<endl;
struct pollfd pfd;
pfd.fd = s;
pfd.events = POLLIN;
s=socket(AF_INET6,SOCK_DGRAM,0);
if(s<0) {
if( errno == EAFNOSUPPORT ) {
- L<<Logger::Error<<"IPv6 Address Family is not supported - skipping UDPv6 bind" << endl;
+ g_log<<Logger::Error<<"IPv6 Address Family is not supported - skipping UDPv6 bind" << endl;
return;
} else {
- L<<Logger::Error<<"Unable to acquire a UDPv6 socket: "+string(strerror(errno)) << endl;
+ g_log<<Logger::Error<<"Unable to acquire a UDPv6 socket: "+string(strerror(errno)) << endl;
throw PDNSException("Unable to acquire a UDPv6 socket: "+string(strerror(errno)));
}
}
}
if (!setSocketTimestamps(s))
- L<<Logger::Warning<<"Unable to enable timestamp reporting for socket"<<endl;
+ g_log<<Logger::Warning<<"Unable to enable timestamp reporting for socket"<<endl;
#ifdef SO_REUSEPORT
if( d_can_reuseport )
if(::bind(s, (sockaddr*)&locala, sizeof(locala))<0) {
close(s);
if( errno == EADDRNOTAVAIL && ! ::arg().mustDo("local-ipv6-nonexist-fail") ) {
- L<<Logger::Error<<"IPv6 Address " << localname << " does not exist on this server - skipping UDP bind" << endl;
+ g_log<<Logger::Error<<"IPv6 Address " << localname << " does not exist on this server - skipping UDP bind" << endl;
continue;
} else {
- L<<Logger::Error<<"Unable to bind to UDPv6 socket "<< localname <<": "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to bind to UDPv6 socket "<< localname <<": "<<strerror(errno)<<endl;
throw PDNSException("Unable to bind to UDPv6 socket");
}
}
pfd.events = POLLIN;
pfd.revents = 0;
d_rfds.push_back(pfd);
- L<<Logger::Error<<"UDPv6 server bound to "<<locala.toStringWithPort()<<endl;
+ g_log<<Logger::Error<<"UDPv6 server bound to "<<locala.toStringWithPort()<<endl;
}
}
bindIPv6();
if(::arg()["local-address"].empty() && ::arg()["local-ipv6"].empty())
- L<<Logger::Critical<<"PDNS is deaf and mute! Not listening on any interfaces"<<endl;
+ g_log<<Logger::Critical<<"PDNS is deaf and mute! Not listening on any interfaces"<<endl;
}
void UDPNameserver::send(DNSPacket *p)
if(p->d_anyLocal) {
addCMsgSrcAddr(&msgh, cbuf, p->d_anyLocal.get_ptr(), 0);
}
- DLOG(L<<Logger::Notice<<"Sending a packet to "<< p->getRemote() <<" ("<< buffer.length()<<" octets)"<<endl);
+ DLOG(g_log<<Logger::Notice<<"Sending a packet to "<< p->getRemote() <<" ("<< buffer.length()<<" octets)"<<endl);
if(buffer.length() > p->getMaxReplyLen()) {
- L<<Logger::Error<<"Weird, trying to send a message that needs truncation, "<< buffer.length()<<" > "<<p->getMaxReplyLen()<<endl;
+ g_log<<Logger::Error<<"Weird, trying to send a message that needs truncation, "<< buffer.length()<<" > "<<p->getMaxReplyLen()<<endl;
}
if(sendmsg(p->getSocket(), &msgh, 0) < 0)
- L<<Logger::Error<<"Error sending reply with sendmsg (socket="<<p->getSocket()<<", dest="<<p->d_remote.toStringWithPort()<<"): "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Error sending reply with sendmsg (socket="<<p->getSocket()<<", dest="<<p->d_remote.toStringWithPort()<<"): "<<strerror(errno)<<endl;
}
DNSPacket *UDPNameserver::receive(DNSPacket *prefilled, std::string& buffer)
sock=pfd.fd;
if((len=recvmsg(sock, &msgh, 0)) < 0 ) {
if(errno != EAGAIN)
- L<<Logger::Error<<"recvfrom gave error, ignoring: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"recvfrom gave error, ignoring: "<<strerror(errno)<<endl;
return 0;
}
break;
if(sock==-1)
throw PDNSException("poll betrayed us! (should not happen)");
- DLOG(L<<"Received a packet " << len <<" bytes long from "<< remote.toString()<<endl);
+ DLOG(g_log<<"Received a packet " << len <<" bytes long from "<< remote.toString()<<endl);
BOOST_STATIC_ASSERT(offsetof(sockaddr_in, sin_port) == offsetof(sockaddr_in6, sin6_port));
#include <bitset>
#include "dnsparser.hh"
#include "iputils.hh"
-#undef L
#include <boost/program_options.hpp>
#include <boost/format.hpp>
#include <bitset>
#include "dnsparser.hh"
#include "iputils.hh"
-#undef L
#include <boost/program_options.hpp>
#include <boost/format.hpp>
PacketHandler::~PacketHandler()
{
--s_count;
- DLOG(L<<Logger::Error<<"PacketHandler destructor called - "<<s_count<<" left"<<endl);
+ DLOG(g_log<<Logger::Error<<"PacketHandler destructor called - "<<s_count<<" left"<<endl);
}
/**
DNSName prefix;
DNSName subdomain(target);
do {
- DLOG(L<<"Attempting DNAME lookup for "<<subdomain<<", sd.qname="<<sd.qname<<endl);
+ DLOG(g_log<<"Attempting DNAME lookup for "<<subdomain<<", sd.qname="<<sd.qname<<endl);
B.lookup(QType(QType::DNAME), subdomain, p, sd.domain_id);
while(B.get(rr)) {
B.lookup(QType(QType::ANY), subdomain, p, sd.domain_id);
if (B.get(rr)) {
- DLOG(L<<"No wildcard match, ancestor exists"<<endl);
+ DLOG(g_log<<"No wildcard match, ancestor exists"<<endl);
while (B.get(rr)) ;
break;
}
if(rr.dr.d_type != QType::A && rr.dr.d_type!=QType::AAAA)
continue;
if(rr.domain_id!=i->domain_id && ::arg()["out-of-zone-additional-processing"]=="no") {
- DLOG(L<<Logger::Warning<<"Not including out-of-zone additional processing of "<<i->dr.d_name<<" ("<<rr.dr.d_name<<")"<<endl);
+ DLOG(g_log<<Logger::Warning<<"Not including out-of-zone additional processing of "<<i->dr.d_name<<" ("<<rr.dr.d_name<<")"<<endl);
continue; // not adding out-of-zone additional data
}
void PacketHandler::addNSEC3(DNSPacket *p, DNSPacket *r, const DNSName& target, const DNSName& wildcard, const DNSName& auth, const NSEC3PARAMRecordContent& ns3rc, bool narrow, int mode)
{
- DLOG(L<<"addNSEC3() mode="<<mode<<" auth="<<auth<<" target="<<target<<" wildcard="<<wildcard<<endl);
+ DLOG(g_log<<"addNSEC3() mode="<<mode<<" auth="<<auth<<" target="<<target<<" wildcard="<<wildcard<<endl);
SOAData sd;
if(!B.getSOAUncached(auth, sd)) {
- DLOG(L<<"Could not get SOA for domain");
+ DLOG(g_log<<"Could not get SOA for domain");
return;
}
if (mode != 3) {
unhashed=(mode == 0 || mode == 1 || mode == 5) ? target : closest;
hashed=hashQNameWithSalt(ns3rc, unhashed);
- DLOG(L<<"1 hash: "<<toBase32Hex(hashed)<<" "<<unhashed<<endl);
+ DLOG(g_log<<"1 hash: "<<toBase32Hex(hashed)<<" "<<unhashed<<endl);
getNSEC3Hashes(narrow, sd.db, sd.domain_id, hashed, false, unhashed, before, after, mode);
if (((mode == 0 && ns3rc.d_flags) || mode == 1) && (hashed != before)) {
- DLOG(L<<"No matching NSEC3, do closest (provable) encloser"<<endl);
+ DLOG(g_log<<"No matching NSEC3, do closest (provable) encloser"<<endl);
bool doBreak = false;
DNSZoneRecord rr;
doNextcloser = true;
unhashed=closest;
hashed=hashQNameWithSalt(ns3rc, unhashed);
- DLOG(L<<"1 hash: "<<toBase32Hex(hashed)<<" "<<unhashed<<endl);
+ DLOG(g_log<<"1 hash: "<<toBase32Hex(hashed)<<" "<<unhashed<<endl);
getNSEC3Hashes(narrow, sd.db, sd.domain_id, hashed, false, unhashed, before, after);
}
if (!after.empty()) {
- DLOG(L<<"Done calling for matching, hashed: '"<<toBase32Hex(hashed)<<"' before='"<<toBase32Hex(before)<<"', after='"<<toBase32Hex(after)<<"'"<<endl);
+ DLOG(g_log<<"Done calling for matching, hashed: '"<<toBase32Hex(hashed)<<"' before='"<<toBase32Hex(before)<<"', after='"<<toBase32Hex(after)<<"'"<<endl);
emitNSEC3(r, sd, ns3rc, unhashed, before, after, mode);
}
}
while( next.chopOff() && !(next==closest));
hashed=hashQNameWithSalt(ns3rc, unhashed);
- DLOG(L<<"2 hash: "<<toBase32Hex(hashed)<<" "<<unhashed<<endl);
+ DLOG(g_log<<"2 hash: "<<toBase32Hex(hashed)<<" "<<unhashed<<endl);
getNSEC3Hashes(narrow, sd.db,sd.domain_id, hashed, true, unhashed, before, after);
- DLOG(L<<"Done calling for covering, hashed: '"<<toBase32Hex(hashed)<<"' before='"<<toBase32Hex(before)<<"', after='"<<toBase32Hex(after)<<"'"<<endl);
+ DLOG(g_log<<"Done calling for covering, hashed: '"<<toBase32Hex(hashed)<<"' before='"<<toBase32Hex(before)<<"', after='"<<toBase32Hex(after)<<"'"<<endl);
emitNSEC3( r, sd, ns3rc, unhashed, before, after, mode);
}
unhashed=g_wildcarddnsname+closest;
hashed=hashQNameWithSalt(ns3rc, unhashed);
- DLOG(L<<"3 hash: "<<toBase32Hex(hashed)<<" "<<unhashed<<endl);
+ DLOG(g_log<<"3 hash: "<<toBase32Hex(hashed)<<" "<<unhashed<<endl);
getNSEC3Hashes(narrow, sd.db, sd.domain_id, hashed, (mode != 2), unhashed, before, after);
- DLOG(L<<"Done calling for '*', hashed: '"<<toBase32Hex(hashed)<<"' before='"<<toBase32Hex(before)<<"', after='"<<toBase32Hex(after)<<"'"<<endl);
+ DLOG(g_log<<"Done calling for '*', hashed: '"<<toBase32Hex(hashed)<<"' before='"<<toBase32Hex(before)<<"', after='"<<toBase32Hex(after)<<"'"<<endl);
emitNSEC3( r, sd, ns3rc, unhashed, before, after, mode);
}
}
void PacketHandler::addNSEC(DNSPacket *p, DNSPacket *r, const DNSName& target, const DNSName& wildcard, const DNSName& auth, int mode)
{
- DLOG(L<<"addNSEC() mode="<<mode<<" auth="<<auth<<" target="<<target<<" wildcard="<<wildcard<<endl);
+ DLOG(g_log<<"addNSEC() mode="<<mode<<" auth="<<auth<<" target="<<target<<" wildcard="<<wildcard<<endl);
SOAData sd;
if(!B.getSOAUncached(auth, sd)) {
- DLOG(L<<"Could not get SOA for domain"<<endl);
+ DLOG(g_log<<"Could not get SOA for domain"<<endl);
return;
}
resolver.resolve(remote, p->qdomain, QType::NS, &nsset);
}
catch(ResolverException &re) {
- L<<Logger::Error<<"Error resolving SOA or NS for "<<p->qdomain<<" at: "<< remote <<": "<<re.reason<<endl;
+ g_log<<Logger::Error<<"Error resolving SOA or NS for "<<p->qdomain<<" at: "<< remote <<": "<<re.reason<<endl;
return RCode::ServFail;
}
}
if(!haveNS) {
- L<<Logger::Error<<"While checking for supermaster, did not find NS for "<<p->qdomain<<" at: "<< remote <<endl;
+ g_log<<Logger::Error<<"While checking for supermaster, did not find NS for "<<p->qdomain<<" at: "<< remote <<endl;
return RCode::ServFail;
}
DNSBackend *db;
if (!::arg().mustDo("allow-unsigned-supermaster") && tsigkeyname.empty()) {
- L<<Logger::Error<<"Received unsigned NOTIFY for "<<p->qdomain<<" from potential supermaster "<<remote<<". Refusing."<<endl;
+ g_log<<Logger::Error<<"Received unsigned NOTIFY for "<<p->qdomain<<" from potential supermaster "<<remote<<". Refusing."<<endl;
return RCode::Refused;
}
if(!B.superMasterBackend(remote, p->qdomain, nsset, &nameserver, &account, &db)) {
- L<<Logger::Error<<"Unable to find backend willing to host "<<p->qdomain<<" for potential supermaster "<<remote<<". Remote nameservers: "<<endl;
+ g_log<<Logger::Error<<"Unable to find backend willing to host "<<p->qdomain<<" for potential supermaster "<<remote<<". Remote nameservers: "<<endl;
for(const auto& rr: nsset) {
if(rr.qtype==QType::NS)
- L<<Logger::Error<<rr.content<<endl;
+ g_log<<Logger::Error<<rr.content<<endl;
}
return RCode::Refused;
}
}
}
catch(PDNSException& ae) {
- L<<Logger::Error<<"Database error trying to create "<<p->qdomain<<" for potential supermaster "<<remote<<": "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Database error trying to create "<<p->qdomain<<" for potential supermaster "<<remote<<": "<<ae.reason<<endl;
return RCode::ServFail;
}
- L<<Logger::Warning<<"Created new slave zone '"<<p->qdomain<<"' from supermaster "<<remote<<endl;
+ g_log<<Logger::Warning<<"Created new slave zone '"<<p->qdomain<<"' from supermaster "<<remote<<endl;
return RCode::NoError;
}
vector<string> meta;
if(!::arg().mustDo("slave") && s_forwardNotify.empty()) {
- L<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" but slave support is disabled in the configuration"<<endl;
+ g_log<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" but slave support is disabled in the configuration"<<endl;
return RCode::NotImp;
}
if(!s_allowNotifyFrom.match((ComboAddress *) &p->d_remote ) || p->d_havetsig) {
if (p->d_havetsig && p->getTSIGKeyname().empty() == false) {
- L<<Logger::Notice<<"Received secure NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<", allowed by TSIG key '"<<p->getTSIGKeyname()<<"'"<<endl;
+ g_log<<Logger::Notice<<"Received secure NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<", allowed by TSIG key '"<<p->getTSIGKeyname()<<"'"<<endl;
} else {
- L<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" but remote is not permitted by TSIG or allow-notify-from"<<endl;
+ g_log<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" but remote is not permitted by TSIG or allow-notify-from"<<endl;
return RCode::Refused;
}
}
DomainInfo di;
di.serial = 0;
if(!B.getDomainInfo(p->qdomain, di) || !(db=di.backend)) {
- L<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" for which we are not authoritative"<<endl;
+ g_log<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" for which we are not authoritative"<<endl;
return trySuperMaster(p, p->getTSIGKeyname());
}
if (B.getDomainMetadata(p->qdomain,"AXFR-MASTER-TSIG",meta) && meta.size() > 0) {
if (!p->d_havetsig) {
if (::arg().mustDo("allow-unsigned-notify")) {
- L<<Logger::Warning<<"Received unsigned NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<": permitted because allow-unsigned-notify";
+ g_log<<Logger::Warning<<"Received unsigned NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<": permitted because allow-unsigned-notify";
} else {
- L<<Logger::Warning<<"Received unsigned NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<": refused"<<endl;
+ g_log<<Logger::Warning<<"Received unsigned NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<": refused"<<endl;
return RCode::Refused;
}
} else if (meta[0] != p->getTSIGKeyname().toStringNoDot()) {
- L<<Logger::Error<<"Received secure NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<": expected TSIG key '"<<meta[0]<<", got '"<<p->getTSIGKeyname()<<"'"<<endl;
+ g_log<<Logger::Error<<"Received secure NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<": expected TSIG key '"<<meta[0]<<", got '"<<p->getTSIGKeyname()<<"'"<<endl;
return RCode::Refused;
}
}
if(::arg().contains("trusted-notification-proxy", p->getRemote().toString())) {
- L<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from trusted-notification-proxy "<< p->getRemote()<<endl;
+ g_log<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from trusted-notification-proxy "<< p->getRemote()<<endl;
if(di.masters.empty()) {
- L<<Logger::Error<<"However, "<<p->qdomain<<" does not have any masters defined"<<endl;
+ g_log<<Logger::Error<<"However, "<<p->qdomain<<" does not have any masters defined"<<endl;
return RCode::Refused;
}
}
else if(::arg().mustDo("master") && di.kind == DomainInfo::Master) {
- L<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" but we are master, rejecting"<<endl;
+ g_log<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" but we are master, rejecting"<<endl;
return RCode::Refused;
}
else if(!db->isMaster(p->qdomain, p->getRemote().toString())) {
- L<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" which is not a master"<<endl;
+ g_log<<Logger::Error<<"Received NOTIFY for "<<p->qdomain<<" from "<<p->getRemote()<<" which is not a master"<<endl;
return RCode::Refused;
}
if(!s_forwardNotify.empty()) {
set<string> forwardNotify(s_forwardNotify);
for(set<string>::const_iterator j=forwardNotify.begin();j!=forwardNotify.end();++j) {
- L<<Logger::Warning<<"Relaying notification of domain "<<p->qdomain<<" from "<<p->getRemote()<<" to "<<*j<<endl;
+ g_log<<Logger::Warning<<"Relaying notification of domain "<<p->qdomain<<" from "<<p->getRemote()<<" to "<<*j<<endl;
Communicator.notify(p->qdomain,*j);
}
}
{
if(!d_doDNAME)
return false;
- DLOG(L<<Logger::Warning<<"Let's try DNAME.."<<endl);
+ DLOG(g_log<<Logger::Warning<<"Let's try DNAME.."<<endl);
vector<DNSZoneRecord> rrset = getBestDNAMESynth(p, sd, target);
if(!rrset.empty()) {
for(auto& rr: rrset) {
return false;
if(rrset.empty()) {
- DLOG(L<<"Wildcard matched something, but not of the correct type"<<endl);
+ DLOG(g_log<<"Wildcard matched something, but not of the correct type"<<endl);
nodata=true;
}
else {
if(p->d.qr) { // QR bit from dns packet (thanks RA from N)
if(d_logDNSDetails)
- L<<Logger::Error<<"Received an answer (non-query) packet from "<<p->getRemote()<<", dropping"<<endl;
+ g_log<<Logger::Error<<"Received an answer (non-query) packet from "<<p->getRemote()<<", dropping"<<endl;
S.inc("corrupt-packets");
S.ringAccount("remotes-corrupt", p->d_remote);
return 0;
if(p->d.tc) { // truncated query. MOADNSParser would silently parse this packet in an incomplete way.
if(d_logDNSDetails)
- L<<Logger::Error<<"Received truncated query packet from "<<p->getRemote()<<", dropping"<<endl;
+ g_log<<Logger::Error<<"Received truncated query packet from "<<p->getRemote()<<", dropping"<<endl;
S.inc("corrupt-packets");
S.ringAccount("remotes-corrupt", p->d_remote);
return 0;
if(!p->checkForCorrectTSIG(&B, &keyname, &secret, &trc)) {
r=p->replyPacket(); // generate an empty reply packet
if(d_logDNSDetails)
- L<<Logger::Error<<"Received a TSIG signed message with a non-validating key"<<endl;
+ g_log<<Logger::Error<<"Received a TSIG signed message with a non-validating key"<<endl;
// RFC3007 describes that a non-secure message should be sending Refused for DNS Updates
if (p->d.opcode == Opcode::Update)
r->setRcode(RCode::Refused);
if (p->d_tsig_algo == TSIG_GSS) {
GssContext gssctx(keyname);
if (!gssctx.getPeerPrincipal(p->d_peer_principal)) {
- L<<Logger::Warning<<"Failed to extract peer principal from GSS context with keyname '"<<keyname<<"'"<<endl;
+ g_log<<Logger::Warning<<"Failed to extract peer principal from GSS context with keyname '"<<keyname<<"'"<<endl;
}
}
}
if(!validDNSName(p->qdomain)) {
if(d_logDNSDetails)
- L<<Logger::Error<<"Received a malformed qdomain from "<<p->getRemote()<<", '"<<p->qdomain<<"': sending servfail"<<endl;
+ g_log<<Logger::Error<<"Received a malformed qdomain from "<<p->getRemote()<<", '"<<p->qdomain<<"': sending servfail"<<endl;
S.inc("corrupt-packets");
S.ringAccount("remotes-corrupt", p->d_remote);
S.inc("servfail-packets");
return 0;
}
- L<<Logger::Error<<"Received an unknown opcode "<<p->d.opcode<<" from "<<p->getRemote()<<" for "<<p->qdomain<<endl;
+ g_log<<Logger::Error<<"Received an unknown opcode "<<p->d.opcode<<" from "<<p->getRemote()<<" for "<<p->qdomain<<endl;
r->setRcode(RCode::NotImp);
return r;
}
- // L<<Logger::Warning<<"Query for '"<<p->qdomain<<"' "<<p->qtype.getName()<<" from "<<p->getRemote()<< " (tcp="<<p->d_tcp<<")"<<endl;
+ // g_log<<Logger::Warning<<"Query for '"<<p->qdomain<<"' "<<p->qtype.getName()<<" from "<<p->getRemote()<< " (tcp="<<p->d_tcp<<")"<<endl;
if(p->qtype.getCode()==QType::IXFR) {
r->setRcode(RCode::Refused);
retargeted:;
if(retargetcount > 10) { // XXX FIXME, retargetcount++?
- L<<Logger::Warning<<"Abort CNAME chain resolution after "<<--retargetcount<<" redirects, sending out servfail. Initial query: '"<<p->qdomain<<"'"<<endl;
+ g_log<<Logger::Warning<<"Abort CNAME chain resolution after "<<--retargetcount<<" redirects, sending out servfail. Initial query: '"<<p->qdomain<<"'"<<endl;
delete r;
r=p->replyPacket();
r->setRcode(RCode::ServFail);
}
if(!B.getAuth(target, p->qtype, &sd)) {
- DLOG(L<<Logger::Error<<"We have no authority over zone '"<<target<<"'"<<endl);
+ DLOG(g_log<<Logger::Error<<"We have no authority over zone '"<<target<<"'"<<endl);
if(!retargetcount) {
r->setA(false); // drop AA if we never had a SOA in the first place
r->setRcode(RCode::Refused); // send REFUSED - but only on empty 'no idea'
}
goto sendit;
}
- DLOG(L<<Logger::Error<<"We have authority, zone='"<<sd.qname<<"', id="<<sd.domain_id<<endl);
+ DLOG(g_log<<Logger::Error<<"We have authority, zone='"<<sd.qname<<"', id="<<sd.domain_id<<endl);
authSet.insert(sd.qname);
if(!retargetcount) r->qdomainzone=sd.qname;
// this TRUMPS a cname!
if(p->qtype.getCode() == QType::RRSIG) {
- L<<Logger::Info<<"Direct RRSIG query for "<<target<<" from "<<p->getRemote()<<endl;
+ g_log<<Logger::Info<<"Direct RRSIG query for "<<target<<" from "<<p->getRemote()<<endl;
r->setRcode(RCode::Refused);
goto sendit;
}
- DLOG(L<<"Checking for referrals first, unless this is a DS query"<<endl);
+ DLOG(g_log<<"Checking for referrals first, unless this is a DS query"<<endl);
if(p->qtype.getCode() != QType::DS && tryReferral(p, r, sd, target, retargetcount))
goto sendit;
- DLOG(L<<"Got no referrals, trying ANY"<<endl);
+ DLOG(g_log<<"Got no referrals, trying ANY"<<endl);
// see what we get..
B.lookup(QType(QType::ANY), target, p, sd.domain_id);
if(DP && rr.dr.d_type == QType::ALIAS && (p->qtype.getCode() == QType::A || p->qtype.getCode() == QType::AAAA || p->qtype.getCode() == QType::ANY)) {
if (!d_doExpandALIAS) {
- L<<Logger::Info<<"ALIAS record found for "<<target<<", but ALIAS expansion is disabled."<<endl;
+ g_log<<Logger::Info<<"ALIAS record found for "<<target<<", but ALIAS expansion is disabled."<<endl;
continue;
}
haveAlias=getRR<ALIASRecordContent>(rr.dr)->d_content;
}
- DLOG(L<<"After first ANY query for '"<<target<<"', id="<<sd.domain_id<<": weDone="<<weDone<<", weHaveUnauth="<<weHaveUnauth<<", weRedirected="<<weRedirected<<", haveAlias='"<<haveAlias<<"'"<<endl);
+ DLOG(g_log<<"After first ANY query for '"<<target<<"', id="<<sd.domain_id<<": weDone="<<weDone<<", weHaveUnauth="<<weHaveUnauth<<", weRedirected="<<weRedirected<<", haveAlias='"<<haveAlias<<"'"<<endl);
if(p->qtype.getCode() == QType::DS && weHaveUnauth && !weDone && !weRedirected && d_dk.isSecuredZone(sd.qname)) {
- DLOG(L<<"Q for DS of a name for which we do have NS, but for which we don't have on a zone with DNSSEC need to provide an AUTH answer that proves we don't"<<endl);
+ DLOG(g_log<<"Q for DS of a name for which we do have NS, but for which we don't have on a zone with DNSSEC need to provide an AUTH answer that proves we don't"<<endl);
makeNOError(p, r, target, DNSName(), sd, 1);
goto sendit;
}
if(!haveAlias.empty() && (!weDone || p->qtype.getCode() == QType::ANY)) {
- DLOG(L<<Logger::Warning<<"Found nothing that matched for '"<<target<<"', but did get alias to '"<<haveAlias<<"', referring"<<endl);
+ DLOG(g_log<<Logger::Warning<<"Found nothing that matched for '"<<target<<"', but did get alias to '"<<haveAlias<<"', referring"<<endl);
DP->completePacket(r, haveAlias, target);
return 0;
}
if(rrset.empty()) {
- DLOG(L<<"checking qtype.getCode() ["<<(p->qtype.getCode())<<"] against QType::DS ["<<(QType::DS)<<"]"<<endl);
+ DLOG(g_log<<"checking qtype.getCode() ["<<(p->qtype.getCode())<<"] against QType::DS ["<<(QType::DS)<<"]"<<endl);
if(p->qtype.getCode() == QType::DS)
{
- DLOG(L<<"DS query found no direct result, trying referral now"<<endl);
+ DLOG(g_log<<"DS query found no direct result, trying referral now"<<endl);
if(tryReferral(p, r, sd, target, retargetcount))
{
- DLOG(L<<"got referral for DS query"<<endl);
+ DLOG(g_log<<"got referral for DS query"<<endl);
goto sendit;
}
}
- DLOG(L<<Logger::Warning<<"Found nothing in the by-name ANY, but let's try wildcards.."<<endl);
+ DLOG(g_log<<Logger::Warning<<"Found nothing in the by-name ANY, but let's try wildcards.."<<endl);
bool wereRetargeted(false), nodata(false);
DNSName wildcard;
if(tryWildcard(p, r, sd, target, wildcard, wereRetargeted, nodata)) {
goto sendit;
}
else if(weHaveUnauth) {
- DLOG(L<<"Have unauth data, so need to hunt for best NS records"<<endl);
+ DLOG(g_log<<"Have unauth data, so need to hunt for best NS records"<<endl);
if(tryReferral(p, r, sd, target, retargetcount))
goto sendit;
// check whether this could be fixed easily
// if (*(rr.dr.d_name.rbegin()) == '.') {
- // L<<Logger::Error<<"Should not get here ("<<p->qdomain<<"|"<<p->qtype.getCode()<<"): you have a trailing dot, this could be the problem (or run pdnsutil rectify-zone " <<sd.qname<<")"<<endl;
+ // g_log<<Logger::Error<<"Should not get here ("<<p->qdomain<<"|"<<p->qtype.getCode()<<"): you have a trailing dot, this could be the problem (or run pdnsutil rectify-zone " <<sd.qname<<")"<<endl;
// } else {
- L<<Logger::Error<<"Should not get here ("<<p->qdomain<<"|"<<p->qtype.getCode()<<"): please run pdnsutil rectify-zone "<<sd.qname<<endl;
+ g_log<<Logger::Error<<"Should not get here ("<<p->qdomain<<"|"<<p->qtype.getCode()<<"): please run pdnsutil rectify-zone "<<sd.qname<<endl;
// }
}
else {
- DLOG(L<<"Have some data, but not the right data"<<endl);
+ DLOG(g_log<<"Have some data, but not the right data"<<endl);
makeNOError(p, r, target, DNSName(), sd, 0);
}
PC.insert(p, r, r->getMinTTL()); // in the packet cache
}
catch(DBException &e) {
- L<<Logger::Error<<"Backend reported condition which prevented lookup ("+e.reason+") sending out servfail"<<endl;
+ g_log<<Logger::Error<<"Backend reported condition which prevented lookup ("+e.reason+") sending out servfail"<<endl;
delete r;
r=p->replyPacket(); // generate an empty reply packet
r->setRcode(RCode::ServFail);
S.ringAccount("servfail-queries",p->qdomain.toLogString());
}
catch(PDNSException &e) {
- L<<Logger::Error<<"Backend reported permanent error which prevented lookup ("+e.reason+"), aborting"<<endl;
+ g_log<<Logger::Error<<"Backend reported permanent error which prevented lookup ("+e.reason+"), aborting"<<endl;
throw; // we WANT to die at this point
}
catch(std::exception &e) {
- L<<Logger::Error<<"Exception building answer packet for "<<p->qdomain<<"/"<<p->qtype.getName()<<" ("<<e.what()<<") sending out servfail"<<endl;
+ g_log<<Logger::Error<<"Exception building answer packet for "<<p->qdomain<<"/"<<p->qtype.getName()<<" ("<<e.what()<<") sending out servfail"<<endl;
delete r;
r=p->replyPacket(); // generate an empty reply packet
r->setRcode(RCode::ServFail);
ssize_t ret=recvfrom(fd, resp, sizeof(resp), 0, (sockaddr *)&fromaddr, &addrlen);
if (fromaddr != pident.remote) {
- L<<Logger::Notice<<"Response received from the wrong remote host ("<<fromaddr.toStringWithPort()<<" instead of "<<pident.remote.toStringWithPort()<<"), discarding"<<endl;
+ g_log<<Logger::Notice<<"Response received from the wrong remote host ("<<fromaddr.toStringWithPort()<<" instead of "<<pident.remote.toStringWithPort()<<"), discarding"<<endl;
}
socklen_t len=sizeof(psize);
if(!getsockopt(fd, SOL_SOCKET, optname, (char*)&psize, &len) && psize > size) {
- L<<Logger::Error<<"Not decreasing socket buffer size from "<<psize<<" to "<<size<<endl;
+ g_log<<Logger::Error<<"Not decreasing socket buffer size from "<<psize<<" to "<<size<<endl;
return;
}
if (setsockopt(fd, SOL_SOCKET, optname, (char*)&size, sizeof(size)) < 0 )
- L<<Logger::Error<<"Unable to raise socket buffer size to "<<size<<": "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to raise socket buffer size to "<<size<<": "<<strerror(errno)<<endl;
}
closesocket(*fd);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing UDP socket after connect() failed: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing UDP socket after connect() failed: "<<e.reason<<endl;
}
if(err==ENETUNREACH) // Seth "My Interfaces Are Like A Yo Yo" Arnold special
closesocket(*i);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing returned UDP socket: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing returned UDP socket: "<<e.reason<<endl;
}
d_socks.erase(i++);
*d_len=packet.size();
memcpy(data,packet.c_str(),min(len,*d_len));
if(*nearMissLimit && pident.nearMisses > *nearMissLimit) {
- L<<Logger::Error<<"Too many ("<<pident.nearMisses<<" > "<<*nearMissLimit<<") bogus answers for '"<<domain<<"' from "<<fromaddr.toString()<<", assuming spoof attempt."<<endl;
+ g_log<<Logger::Error<<"Too many ("<<pident.nearMisses<<" > "<<*nearMissLimit<<") bogus answers for '"<<domain<<"' from "<<fromaddr.toString()<<", assuming spoof attempt."<<endl;
g_stats.spoofCount++;
return -1;
}
if(of)
of<< Utility::getpid() <<endl;
else
- L<<Logger::Error<<"Writing pid for "<<Utility::getpid()<<" to "<<s_pidfname<<" failed: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Writing pid for "<<Utility::getpid()<<" to "<<s_pidfname<<" failed: "<<strerror(errno)<<endl;
}
TCPConnection::TCPConnection(int fd, const ComboAddress& addr) : d_remote(addr), d_fd(fd)
{
try {
if(closesocket(d_fd) < 0)
- L<<Logger::Error<<"Error closing socket for TCPConnection"<<endl;
+ g_log<<Logger::Error<<"Error closing socket for TCPConnection"<<endl;
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing TCPConnection socket: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing TCPConnection socket: "<<e.reason<<endl;
}
if(t_tcpClientCounts->count(d_remote) && !(*t_tcpClientCounts)[d_remote]--)
if(!g_quiet || tracedQuery) {
- L<<Logger::Warning<<t_id<<" ["<<MT->getTid()<<"/"<<MT->numProcesses()<<"] " << (dc->d_tcp ? "TCP " : "") << "question for '"<<dc->d_mdp.d_qname<<"|"
+ g_log<<Logger::Warning<<t_id<<" ["<<MT->getTid()<<"/"<<MT->numProcesses()<<"] " << (dc->d_tcp ? "TCP " : "") << "question for '"<<dc->d_mdp.d_qname<<"|"
<<DNSRecordContent::NumberToType(dc->d_mdp.d_qtype)<<"' from "<<dc->getRemote();
if(!dc->d_ednssubnet.source.empty()) {
- L<<" (ecs "<<dc->d_ednssubnet.source.toString()<<")";
+ g_log<<" (ecs "<<dc->d_ednssubnet.source.toString()<<")";
}
- L<<endl;
+ g_log<<endl;
}
sr.setId(MT->getTid());
}
catch(ImmediateServFailException &e) {
if(g_logCommonErrors)
- L<<Logger::Notice<<"Sending SERVFAIL to "<<dc->getRemote()<<" during resolve of '"<<dc->d_mdp.d_qname<<"' because: "<<e.reason<<endl;
+ g_log<<Logger::Notice<<"Sending SERVFAIL to "<<dc->getRemote()<<" during resolve of '"<<dc->d_mdp.d_qname<<"' because: "<<e.reason<<endl;
res = RCode::ServFail;
}
boost::split(lines, trace, boost::is_any_of("\n"));
for(const string& line : lines) {
if(!line.empty())
- L<<Logger::Warning<< line << endl;
+ g_log<<Logger::Warning<< line << endl;
}
}
}
if(!shouldNotValidate && sr.isDNSSECValidationRequested()) {
try {
if(sr.doLog()) {
- L<<Logger::Warning<<"Starting validation of answer to "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" for "<<dc->getRemote()<<endl;
+ g_log<<Logger::Warning<<"Starting validation of answer to "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" for "<<dc->getRemote()<<endl;
}
auto state = sr.getValidationState();
if(state == Secure) {
if(sr.doLog()) {
- L<<Logger::Warning<<"Answer to "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" for "<<dc->getRemote()<<" validates correctly"<<endl;
+ g_log<<Logger::Warning<<"Answer to "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" for "<<dc->getRemote()<<" validates correctly"<<endl;
}
// Is the query source interested in the value of the ad-bit?
}
else if(state == Insecure) {
if(sr.doLog()) {
- L<<Logger::Warning<<"Answer to "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" for "<<dc->getRemote()<<" validates as Insecure"<<endl;
+ g_log<<Logger::Warning<<"Answer to "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" for "<<dc->getRemote()<<" validates as Insecure"<<endl;
}
pw.getHeader()->ad=0;
}
else if(state == Bogus) {
if(g_dnssecLogBogus || sr.doLog() || g_dnssecmode == DNSSECMode::ValidateForLog) {
- L<<Logger::Warning<<"Answer to "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" for "<<dc->getRemote()<<" validates as Bogus"<<endl;
+ g_log<<Logger::Warning<<"Answer to "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" for "<<dc->getRemote()<<" validates as Bogus"<<endl;
}
// Does the query or validation mode sending out a SERVFAIL on validation errors?
if(!pw.getHeader()->cd && (g_dnssecmode == DNSSECMode::ValidateAll || dc->d_mdp.d_header.ad || DNSSECOK)) {
if(sr.doLog()) {
- L<<Logger::Warning<<"Sending out SERVFAIL for "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" because recursor or query demands it for Bogus results"<<endl;
+ g_log<<Logger::Warning<<"Sending out SERVFAIL for "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" because recursor or query demands it for Bogus results"<<endl;
}
pw.getHeader()->rcode=RCode::ServFail;
goto sendit;
} else {
if(sr.doLog()) {
- L<<Logger::Warning<<"Not sending out SERVFAIL for "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" Bogus validation since neither config nor query demands this"<<endl;
+ g_log<<Logger::Warning<<"Not sending out SERVFAIL for "<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<" Bogus validation since neither config nor query demands this"<<endl;
}
}
}
}
catch(ImmediateServFailException &e) {
if(g_logCommonErrors)
- L<<Logger::Notice<<"Sending SERVFAIL to "<<dc->getRemote()<<" during validation of '"<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<"' because: "<<e.reason<<endl;
+ g_log<<Logger::Notice<<"Sending SERVFAIL to "<<dc->getRemote()<<" during validation of '"<<dc->d_mdp.d_qname<<"|"<<QType(dc->d_mdp.d_qtype).getName()<<"' because: "<<e.reason<<endl;
pw.getHeader()->rcode=RCode::ServFail;
goto sendit;
}
addCMsgSrcAddr(&msgh, cbuf, &dc->d_local, 0);
}
if(sendmsg(dc->d_socket, &msgh, 0) < 0 && g_logCommonErrors)
- L<<Logger::Warning<<"Sending UDP reply to client "<<dc->getRemote()<<" failed with: "<<strerror(errno)<<endl;
+ g_log<<Logger::Warning<<"Sending UDP reply to client "<<dc->getRemote()<<" failed with: "<<strerror(errno)<<endl;
if(!SyncRes::s_nopacketcache && !variableAnswer && !sr.wasVariable() ) {
t_packetCache->insertResponsePacket(dc->d_tag, dc->d_qhash, dc->d_mdp.d_qname, dc->d_mdp.d_qtype, dc->d_mdp.d_qclass,
bool hadError=true;
if(wret == 0)
- L<<Logger::Error<<"EOF writing TCP answer to "<<dc->getRemote()<<endl;
+ g_log<<Logger::Error<<"EOF writing TCP answer to "<<dc->getRemote()<<endl;
else if(wret < 0 )
- L<<Logger::Error<<"Error writing TCP answer to "<<dc->getRemote()<<": "<< strerror(errno) <<endl;
+ g_log<<Logger::Error<<"Error writing TCP answer to "<<dc->getRemote()<<": "<< strerror(errno) <<endl;
else if((unsigned int)wret != 2 + packet.size())
- L<<Logger::Error<<"Oops, partial answer sent to "<<dc->getRemote()<<" for "<<dc->d_mdp.d_qname<<" (size="<< (2 + packet.size()) <<", sent "<<wret<<")"<<endl;
+ g_log<<Logger::Error<<"Oops, partial answer sent to "<<dc->getRemote()<<" for "<<dc->d_mdp.d_qname<<" (size="<< (2 + packet.size()) <<", sent "<<wret<<")"<<endl;
else
hadError=false;
}
float spent=makeFloat(sr.getNow()-dc->d_now);
if(!g_quiet) {
- L<<Logger::Error<<t_id<<" ["<<MT->getTid()<<"/"<<MT->numProcesses()<<"] answer to "<<(dc->d_mdp.d_header.rd?"":"non-rd ")<<"question '"<<dc->d_mdp.d_qname<<"|"<<DNSRecordContent::NumberToType(dc->d_mdp.d_qtype);
- L<<"': "<<ntohs(pw.getHeader()->ancount)<<" answers, "<<ntohs(pw.getHeader()->arcount)<<" additional, took "<<sr.d_outqueries<<" packets, "<<
+ g_log<<Logger::Error<<t_id<<" ["<<MT->getTid()<<"/"<<MT->numProcesses()<<"] answer to "<<(dc->d_mdp.d_header.rd?"":"non-rd ")<<"question '"<<dc->d_mdp.d_qname<<"|"<<DNSRecordContent::NumberToType(dc->d_mdp.d_qtype);
+ g_log<<"': "<<ntohs(pw.getHeader()->ancount)<<" answers, "<<ntohs(pw.getHeader()->arcount)<<" additional, took "<<sr.d_outqueries<<" packets, "<<
sr.d_totUsec/1000.0<<" netw ms, "<< spent*1000.0<<" tot ms, "<<
sr.d_throttledqueries<<" throttled, "<<sr.d_timeouts<<" timeouts, "<<sr.d_tcpoutqueries<<" tcp connections, rcode="<< res;
if(!shouldNotValidate && sr.isDNSSECValidationRequested()) {
- L<< ", dnssec="<<vStates[sr.getValidationState()];
+ g_log<< ", dnssec="<<vStates[sr.getValidationState()];
}
- L<<endl;
+ g_log<<endl;
}
dc=0;
}
catch(PDNSException &ae) {
- L<<Logger::Error<<"startDoResolve problem "<<makeLoginfo(dc)<<": "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"startDoResolve problem "<<makeLoginfo(dc)<<": "<<ae.reason<<endl;
delete dc;
}
catch(MOADNSException& e) {
- L<<Logger::Error<<"DNS parser error "<<makeLoginfo(dc) <<": "<<dc->d_mdp.d_qname<<", "<<e.what()<<endl;
+ g_log<<Logger::Error<<"DNS parser error "<<makeLoginfo(dc) <<": "<<dc->d_mdp.d_qname<<", "<<e.what()<<endl;
delete dc;
}
catch(std::exception& e) {
- L<<Logger::Error<<"STL error "<< makeLoginfo(dc)<<": "<<e.what();
+ g_log<<Logger::Error<<"STL error "<< makeLoginfo(dc)<<": "<<e.what();
// Luawrapper nests the exception from Lua, so we unnest it here
try {
std::rethrow_if_nested(e);
} catch(const std::exception& ne) {
- L<<". Extra info: "<<ne.what();
+ g_log<<". Extra info: "<<ne.what();
} catch(...) {}
- L<<endl;
+ g_log<<endl;
delete dc;
}
catch(...) {
- L<<Logger::Error<<"Any other exception in a resolver context "<< makeLoginfo(dc) <<endl;
+ g_log<<Logger::Error<<"Any other exception in a resolver context "<< makeLoginfo(dc) <<endl;
}
g_stats.maxMThreadStackUsage = max(MT->getMaxStackUsage(), g_stats.maxMThreadStackUsage);
}
if(!bytes || bytes < 0) {
if(g_logCommonErrors)
- L<<Logger::Error<<"TCP client "<< conn->d_remote.toStringWithPort() <<" disconnected after first byte"<<endl;
+ g_log<<Logger::Error<<"TCP client "<< conn->d_remote.toStringWithPort() <<" disconnected after first byte"<<endl;
t_fdm->removeReadFD(fd);
return;
}
else if(conn->state==TCPConnection::GETQUESTION) {
ssize_t bytes=recv(conn->getFD(), conn->data + conn->bytesread, conn->qlen - conn->bytesread, 0);
if(!bytes || bytes < 0 || bytes > std::numeric_limits<std::uint16_t>::max()) {
- L<<Logger::Error<<"TCP client "<< conn->d_remote.toStringWithPort() <<" disconnected while reading question body"<<endl;
+ g_log<<Logger::Error<<"TCP client "<< conn->d_remote.toStringWithPort() <<" disconnected while reading question body"<<endl;
t_fdm->removeReadFD(fd);
return;
}
catch(MOADNSException &mde) {
g_stats.clientParseError++;
if(g_logCommonErrors)
- L<<Logger::Error<<"Unable to parse packet from TCP client "<< conn->d_remote.toStringWithPort() <<endl;
+ g_log<<Logger::Error<<"Unable to parse packet from TCP client "<< conn->d_remote.toStringWithPort() <<endl;
return;
}
dc->d_tcpConnection = conn; // carry the torch
}
catch(const std::exception& e) {
if(g_logCommonErrors)
- L<<Logger::Warning<<"Error parsing a query packet qname='"<<qname<<"' for tag determination, setting tag=0: "<<e.what()<<endl;
+ g_log<<Logger::Warning<<"Error parsing a query packet qname='"<<qname<<"' for tag determination, setting tag=0: "<<e.what()<<endl;
}
}
}
catch(const std::exception& e)
{
if(g_logCommonErrors)
- L<<Logger::Warning<<"Error parsing a query packet for tag determination, setting tag=0: "<<e.what()<<endl;
+ g_log<<Logger::Warning<<"Error parsing a query packet for tag determination, setting tag=0: "<<e.what()<<endl;
}
}
#ifdef HAVE_PROTOBUF
}
catch(std::exception& e) {
if(g_logCommonErrors)
- L<<Logger::Warning<<"Error parsing a TCP query packet for edns subnet: "<<e.what()<<endl;
+ g_log<<Logger::Warning<<"Error parsing a TCP query packet for edns subnet: "<<e.what()<<endl;
}
}
#endif
if(dc->d_mdp.d_header.qr) {
g_stats.ignoredCount++;
- L<<Logger::Error<<"Ignoring answer from TCP client "<< dc->getRemote() <<" on server socket!"<<endl;
+ g_log<<Logger::Error<<"Ignoring answer from TCP client "<< dc->getRemote() <<" on server socket!"<<endl;
delete dc;
return;
}
if(dc->d_mdp.d_header.opcode) {
g_stats.ignoredCount++;
- L<<Logger::Error<<"Ignoring non-query opcode from TCP client "<< dc->getRemote() <<" on server socket!"<<endl;
+ g_log<<Logger::Error<<"Ignoring non-query opcode from TCP client "<< dc->getRemote() <<" on server socket!"<<endl;
delete dc;
return;
}
closesocket(newsock);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing TCP socket after an over capacity drop: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing TCP socket after an over capacity drop: "<<e.reason<<endl;
}
return;
}
t_remotes->push_back(addr);
if(t_allowFrom && !t_allowFrom->match(&addr)) {
if(!g_quiet)
- L<<Logger::Error<<"["<<MT->getTid()<<"] dropping TCP query from "<<addr.toString()<<", address not matched by allow-from"<<endl;
+ g_log<<Logger::Error<<"["<<MT->getTid()<<"] dropping TCP query from "<<addr.toString()<<", address not matched by allow-from"<<endl;
g_stats.unauthorizedTCP++;
try {
closesocket(newsock);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing TCP socket after an ACL drop: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing TCP socket after an ACL drop: "<<e.reason<<endl;
}
return;
}
closesocket(newsock); // don't call TCPConnection::closeAndCleanup here - did not enter it in the counts yet!
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing TCP socket after an overflow drop: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing TCP socket after an overflow drop: "<<e.reason<<endl;
}
return;
}
}
catch(const std::exception& e) {
if(g_logCommonErrors)
- L<<Logger::Warning<<"Error parsing a query packet qname='"<<qname<<"' for tag determination, setting tag=0: "<<e.what()<<endl;
+ g_log<<Logger::Warning<<"Error parsing a query packet qname='"<<qname<<"' for tag determination, setting tag=0: "<<e.what()<<endl;
}
}
}
catch(const std::exception& e)
{
if(g_logCommonErrors)
- L<<Logger::Warning<<"Error parsing a query packet for tag determination, setting tag=0: "<<e.what()<<endl;
+ g_log<<Logger::Warning<<"Error parsing a query packet for tag determination, setting tag=0: "<<e.what()<<endl;
}
}
}
#endif /* HAVE_PROTOBUF */
if(!g_quiet)
- L<<Logger::Notice<<t_id<< " question answered from packet cache tag="<<ctag<<" from "<<source.toStringWithPort()<<(source != fromaddr ? " (via "+fromaddr.toStringWithPort()+")" : "")<<endl;
+ g_log<<Logger::Notice<<t_id<< " question answered from packet cache tag="<<ctag<<" from "<<source.toStringWithPort()<<(source != fromaddr ? " (via "+fromaddr.toStringWithPort()+")" : "")<<endl;
g_stats.packetCacheHits++;
SyncRes::s_queries++;
addCMsgSrcAddr(&msgh, cbuf, &destaddr, 0);
}
if(sendmsg(fd, &msgh, 0) < 0 && g_logCommonErrors)
- L<<Logger::Warning<<"Sending UDP reply to client "<<source.toStringWithPort()<<(source != fromaddr ? " (via "+fromaddr.toStringWithPort()+")" : "")<<" failed with: "<<strerror(errno)<<endl;
+ g_log<<Logger::Warning<<"Sending UDP reply to client "<<source.toStringWithPort()<<(source != fromaddr ? " (via "+fromaddr.toStringWithPort()+")" : "")<<" failed with: "<<strerror(errno)<<endl;
if(response.length() >= sizeof(struct dnsheader)) {
struct dnsheader tmpdh;
}
}
catch(std::exception& e) {
- L<<Logger::Error<<"Error processing or aging answer packet: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Error processing or aging answer packet: "<<e.what()<<endl;
return 0;
}
if(t_pdl) {
if(t_pdl->ipfilter(source, destination, *dh)) {
if(!g_quiet)
- L<<Logger::Notice<<t_id<<" ["<<MT->getTid()<<"/"<<MT->numProcesses()<<"] DROPPED question from "<<source.toStringWithPort()<<(source != fromaddr ? " (via "+fromaddr.toStringWithPort()+")" : "")<<" based on policy"<<endl;
+ g_log<<Logger::Notice<<t_id<<" ["<<MT->getTid()<<"/"<<MT->numProcesses()<<"] DROPPED question from "<<source.toStringWithPort()<<(source != fromaddr ? " (via "+fromaddr.toStringWithPort()+")" : "")<<" based on policy"<<endl;
g_stats.policyDrops++;
return 0;
}
if(MT->numProcesses() > g_maxMThreads) {
if(!g_quiet)
- L<<Logger::Notice<<t_id<<" ["<<MT->getTid()<<"/"<<MT->numProcesses()<<"] DROPPED question from "<<source.toStringWithPort()<<(source != fromaddr ? " (via "+fromaddr.toStringWithPort()+")" : "")<<", over capacity"<<endl;
+ g_log<<Logger::Notice<<t_id<<" ["<<MT->getTid()<<"/"<<MT->numProcesses()<<"] DROPPED question from "<<source.toStringWithPort()<<(source != fromaddr ? " (via "+fromaddr.toStringWithPort()+")" : "")<<", over capacity"<<endl;
g_stats.overCapacityDrops++;
return 0;
if(t_allowFrom && !t_allowFrom->match(&fromaddr)) {
if(!g_quiet)
- L<<Logger::Error<<"["<<MT->getTid()<<"] dropping UDP query from "<<fromaddr.toString()<<", address not matched by allow-from"<<endl;
+ g_log<<Logger::Error<<"["<<MT->getTid()<<"] dropping UDP query from "<<fromaddr.toString()<<", address not matched by allow-from"<<endl;
g_stats.unauthorizedUDP++;
return;
BOOST_STATIC_ASSERT(offsetof(sockaddr_in, sin_port) == offsetof(sockaddr_in6, sin6_port));
if(!fromaddr.sin4.sin_port) { // also works for IPv6
if(!g_quiet)
- L<<Logger::Error<<"["<<MT->getTid()<<"] dropping UDP query from "<<fromaddr.toStringWithPort()<<", can't deal with port 0"<<endl;
+ g_log<<Logger::Error<<"["<<MT->getTid()<<"] dropping UDP query from "<<fromaddr.toStringWithPort()<<", can't deal with port 0"<<endl;
g_stats.clientParseError++; // not quite the best place to put it, but needs to go somewhere
return;
if(dh->qr) {
g_stats.ignoredCount++;
if(g_logCommonErrors)
- L<<Logger::Error<<"Ignoring answer from "<<fromaddr.toString()<<" on server socket!"<<endl;
+ g_log<<Logger::Error<<"Ignoring answer from "<<fromaddr.toString()<<" on server socket!"<<endl;
}
else if(dh->opcode) {
g_stats.ignoredCount++;
if(g_logCommonErrors)
- L<<Logger::Error<<"Ignoring non-query opcode "<<dh->opcode<<" from "<<fromaddr.toString()<<" on server socket!"<<endl;
+ g_log<<Logger::Error<<"Ignoring non-query opcode "<<dh->opcode<<" from "<<fromaddr.toString()<<" on server socket!"<<endl;
}
else {
string question(data, (size_t)len);
catch(MOADNSException& mde) {
g_stats.clientParseError++;
if(g_logCommonErrors)
- L<<Logger::Error<<"Unable to parse packet from remote UDP client "<<fromaddr.toString() <<": "<<mde.what()<<endl;
+ g_log<<Logger::Error<<"Unable to parse packet from remote UDP client "<<fromaddr.toString() <<": "<<mde.what()<<endl;
}
catch(std::runtime_error& e) {
g_stats.clientParseError++;
if(g_logCommonErrors)
- L<<Logger::Error<<"Unable to parse packet from remote UDP client "<<fromaddr.toString() <<": "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Unable to parse packet from remote UDP client "<<fromaddr.toString() <<": "<<e.what()<<endl;
}
}
else {
int tmp=1;
if(setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof tmp)<0) {
- L<<Logger::Error<<"Setsockopt failed for TCP listening socket"<<endl;
+ g_log<<Logger::Error<<"Setsockopt failed for TCP listening socket"<<endl;
exit(1);
}
if(sin.sin6.sin6_family == AF_INET6 && setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &tmp, sizeof(tmp)) < 0) {
- L<<Logger::Error<<"Failed to set IPv6 socket to IPv6 only, continuing anyhow: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Failed to set IPv6 socket to IPv6 only, continuing anyhow: "<<strerror(errno)<<endl;
}
#ifdef TCP_DEFER_ACCEPT
if(setsockopt(fd, IPPROTO_TCP, TCP_DEFER_ACCEPT, &tmp, sizeof tmp) >= 0) {
if(i==locals.begin())
- L<<Logger::Error<<"Enabled TCP data-ready filter for (slight) DoS protection"<<endl;
+ g_log<<Logger::Error<<"Enabled TCP data-ready filter for (slight) DoS protection"<<endl;
}
#endif
#ifdef TCP_FASTOPEN
int fastOpenQueueSize = ::arg().asNum("tcp-fast-open");
if (setsockopt(fd, IPPROTO_TCP, TCP_FASTOPEN, &fastOpenQueueSize, sizeof fastOpenQueueSize) < 0) {
- L<<Logger::Error<<"Failed to enable TCP Fast Open for listening socket: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Failed to enable TCP Fast Open for listening socket: "<<strerror(errno)<<endl;
}
#else
- L<<Logger::Warning<<"TCP Fast Open configured but not supported for listening socket"<<endl;
+ g_log<<Logger::Warning<<"TCP Fast Open configured but not supported for listening socket"<<endl;
#endif
}
// we don't need to update g_listenSocketsAddresses since it doesn't work for TCP/IP:
// - fd is not that which we know here, but returned from accept()
if(sin.sin4.sin_family == AF_INET)
- L<<Logger::Error<<"Listening for TCP queries on "<< sin.toString() <<":"<<st.port<<endl;
+ g_log<<Logger::Error<<"Listening for TCP queries on "<< sin.toString() <<":"<<st.port<<endl;
else
- L<<Logger::Error<<"Listening for TCP queries on ["<< sin.toString() <<"]:"<<st.port<<endl;
+ g_log<<Logger::Error<<"Listening for TCP queries on ["<< sin.toString() <<"]:"<<st.port<<endl;
}
}
throw PDNSException("Making a UDP server socket for resolver: "+netstringerror());
}
if (!setSocketTimestamps(fd))
- L<<Logger::Warning<<"Unable to enable timestamp reporting for socket"<<endl;
+ g_log<<Logger::Warning<<"Unable to enable timestamp reporting for socket"<<endl;
if(IsAnyAddress(sin)) {
if(sin.sin4.sin_family == AF_INET)
g_fromtosockets.insert(fd);
#endif
if(sin.sin6.sin6_family == AF_INET6 && setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &one, sizeof(one)) < 0) {
- L<<Logger::Error<<"Failed to set IPv6 socket to IPv6 only, continuing anyhow: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Failed to set IPv6 socket to IPv6 only, continuing anyhow: "<<strerror(errno)<<endl;
}
}
if( ::arg().mustDo("non-local-bind") )
deferredAdds[threadId].push_back(make_pair(fd, handleNewUDPQuestion));
g_listenSocketsAddresses[fd]=sin; // this is written to only from the startup thread, not from the workers
if(sin.sin4.sin_family == AF_INET)
- L<<Logger::Error<<"Listening for UDP queries on "<< sin.toString() <<":"<<st.port<<endl;
+ g_log<<Logger::Error<<"Listening for UDP queries on "<< sin.toString() <<":"<<st.port<<endl;
else
- L<<Logger::Error<<"Listening for UDP queries on ["<< sin.toString() <<"]:"<<st.port<<endl;
+ g_log<<Logger::Error<<"Listening for UDP queries on ["<< sin.toString() <<"]:"<<st.port<<endl;
}
}
int i=open("/dev/null",O_RDWR); /* open stdin */
if(i < 0)
- L<<Logger::Critical<<"Unable to open /dev/null: "<<stringerror()<<endl;
+ g_log<<Logger::Critical<<"Unable to open /dev/null: "<<stringerror()<<endl;
else {
dup2(i,0); /* stdin */
dup2(i,1); /* stderr */
uint64_t cacheMisses = broadcastAccFunction<uint64_t>(pleaseGetCacheMisses);
if(g_stats.qcounter && (cacheHits + cacheMisses) && SyncRes::s_queries && SyncRes::s_outqueries) {
- L<<Logger::Notice<<"stats: "<<g_stats.qcounter<<" questions, "<<
+ g_log<<Logger::Notice<<"stats: "<<g_stats.qcounter<<" questions, "<<
broadcastAccFunction<uint64_t>(pleaseGetCacheSize)<< " cache entries, "<<
broadcastAccFunction<uint64_t>(pleaseGetNegCacheSize)<<" negative entries, "<<
(int)((cacheHits*100.0)/(cacheHits+cacheMisses))<<"% cache hits"<<endl;
- L<<Logger::Notice<<"stats: throttle map: "
+ g_log<<Logger::Notice<<"stats: throttle map: "
<< broadcastAccFunction<uint64_t>(pleaseGetThrottleSize) <<", ns speeds: "
<< broadcastAccFunction<uint64_t>(pleaseGetNsSpeedsSize)<<endl;
- L<<Logger::Notice<<"stats: outpacket/query ratio "<<(int)(SyncRes::s_outqueries*100.0/SyncRes::s_queries)<<"%";
- L<<Logger::Notice<<", "<<(int)(SyncRes::s_throttledqueries*100.0/(SyncRes::s_outqueries+SyncRes::s_throttledqueries))<<"% throttled, "
+ g_log<<Logger::Notice<<"stats: outpacket/query ratio "<<(int)(SyncRes::s_outqueries*100.0/SyncRes::s_queries)<<"%";
+ g_log<<Logger::Notice<<", "<<(int)(SyncRes::s_throttledqueries*100.0/(SyncRes::s_outqueries+SyncRes::s_throttledqueries))<<"% throttled, "
<<SyncRes::s_nodelegated<<" no-delegation drops"<<endl;
- L<<Logger::Notice<<"stats: "<<SyncRes::s_tcpoutqueries<<" outgoing tcp connections, "<<
+ g_log<<Logger::Notice<<"stats: "<<SyncRes::s_tcpoutqueries<<" outgoing tcp connections, "<<
broadcastAccFunction<uint64_t>(pleaseGetConcurrentQueries)<<" queries running, "<<SyncRes::s_outgoingtimeouts<<" outgoing timeouts"<<endl;
- //L<<Logger::Notice<<"stats: "<<g_stats.ednsPingMatches<<" ping matches, "<<g_stats.ednsPingMismatches<<" mismatches, "<<
+ //g_log<<Logger::Notice<<"stats: "<<g_stats.ednsPingMatches<<" ping matches, "<<g_stats.ednsPingMismatches<<" mismatches, "<<
//g_stats.noPingOutQueries<<" outqueries w/o ping, "<< g_stats.noEdnsOutQueries<<" w/o EDNS"<<endl;
- L<<Logger::Notice<<"stats: " << broadcastAccFunction<uint64_t>(pleaseGetPacketCacheSize) <<
+ g_log<<Logger::Notice<<"stats: " << broadcastAccFunction<uint64_t>(pleaseGetPacketCacheSize) <<
" packet cache entries, "<<(int)(100.0*broadcastAccFunction<uint64_t>(pleaseGetPacketCacheHits)/SyncRes::s_queries) << "% packet cache hits"<<endl;
time_t now = time(0);
if(lastOutputTime && lastQueryCount && now != lastOutputTime) {
- L<<Logger::Notice<<"stats: "<< (SyncRes::s_queries - lastQueryCount) / (now - lastOutputTime) <<" qps (average over "<< (now - lastOutputTime) << " seconds)"<<endl;
+ g_log<<Logger::Notice<<"stats: "<< (SyncRes::s_queries - lastQueryCount) / (now - lastOutputTime) <<" qps (average over "<< (now - lastOutputTime) << " seconds)"<<endl;
}
lastOutputTime = now;
lastQueryCount = SyncRes::s_queries;
}
else if(statsWanted)
- L<<Logger::Notice<<"stats: no stats yet!"<<endl;
+ g_log<<Logger::Notice<<"stats: no stats yet!"<<endl;
statsWanted=false;
}
}
catch(std::exception& e)
{
- L<<Logger::Error<<"Exception while performing security poll: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Exception while performing security poll: "<<e.what()<<endl;
}
catch(PDNSException& e)
{
- L<<Logger::Error<<"Exception while performing security poll: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Exception while performing security poll: "<<e.reason<<endl;
}
catch(ImmediateServFailException &e)
{
- L<<Logger::Error<<"Exception while performing security poll: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Exception while performing security poll: "<<e.reason<<endl;
}
catch(...)
{
- L<<Logger::Error<<"Exception while performing security poll"<<endl;
+ g_log<<Logger::Error<<"Exception while performing security poll"<<endl;
}
}
catch(PDNSException& ae)
{
s_running=false;
- L<<Logger::Error<<"Fatal error in housekeeping thread: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Fatal error in housekeeping thread: "<<ae.reason<<endl;
throw;
}
}
}
catch(std::exception& e) {
if(g_logCommonErrors)
- L<<Logger::Error<<"PIPE function we executed created exception: "<<e.what()<<endl; // but what if they wanted an answer.. we send 0
+ g_log<<Logger::Error<<"PIPE function we executed created exception: "<<e.what()<<endl; // but what if they wanted an answer.. we send 0
}
catch(PDNSException& e) {
if(g_logCommonErrors)
- L<<Logger::Error<<"PIPE function we executed created PDNS exception: "<<e.reason<<endl; // but what if they wanted an answer.. we send 0
+ g_log<<Logger::Error<<"PIPE function we executed created PDNS exception: "<<e.reason<<endl; // but what if they wanted an answer.. we send 0
}
if(tmsg->wantAnswer) {
if(write(g_pipes[t_id].writeFromThread, &resp, sizeof(resp)) != sizeof(resp)) {
command();
}
catch(std::exception& e) {
- L<<Logger::Error<<"Error dealing with control socket request: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Error dealing with control socket request: "<<e.what()<<endl;
}
catch(PDNSException& ae) {
- L<<Logger::Error<<"Error dealing with control socket request: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Error dealing with control socket request: "<<ae.reason<<endl;
}
}
else {
g_stats.serverParseError++;
if(g_logCommonErrors)
- L<<Logger::Error<<"Unable to parse packet from remote UDP server "<< fromaddr.toString() <<
+ g_log<<Logger::Error<<"Unable to parse packet from remote UDP server "<< fromaddr.toString() <<
": packet smaller than DNS header"<<endl;
}
pident.fd=fd;
if(!dh.qr && g_logCommonErrors) {
- L<<Logger::Notice<<"Not taking data from question on outgoing socket from "<< fromaddr.toStringWithPort() <<endl;
+ g_log<<Logger::Notice<<"Not taking data from question on outgoing socket from "<< fromaddr.toStringWithPort() <<endl;
}
if(!dh.qdcount || // UPC, Nominum, very old BIND on FormErr, NSD
}
catch(std::exception& e) {
g_stats.serverParseError++; // won't be fed to lwres.cc, so we have to increment
- L<<Logger::Warning<<"Error in packet from remote nameserver "<< fromaddr.toStringWithPort() << ": "<<e.what() << endl;
+ g_log<<Logger::Warning<<"Error in packet from remote nameserver "<< fromaddr.toStringWithPort() << ": "<<e.what() << endl;
return;
}
}
}
g_stats.unexpectedCount++; // if we made it here, it really is an unexpected answer
if(g_logCommonErrors) {
- L<<Logger::Warning<<"Discarding unexpected packet from "<<fromaddr.toStringWithPort()<<": "<< (pident.domain.empty() ? "<empty>" : pident.domain.toString())<<", "<<pident.type<<", "<<MT->d_waiters.size()<<" waiters"<<endl;
+ g_log<<Logger::Warning<<"Discarding unexpected packet from "<<fromaddr.toStringWithPort()<<": "<< (pident.domain.empty() ? "<empty>" : pident.domain.toString())<<", "<<pident.type<<", "<<MT->d_waiters.size()<<" waiters"<<endl;
}
}
else if(fd >= 0) {
return ret;
}
catch(FDMultiplexerException &fe) {
- L<<Logger::Error<<"Non-fatal error initializing possible multiplexer ("<<fe.what()<<"), falling back"<<endl;
+ g_log<<Logger::Error<<"Non-fatal error initializing possible multiplexer ("<<fe.what()<<"), falling back"<<endl;
}
catch(...) {
- L<<Logger::Error<<"Non-fatal error initializing possible multiplexer"<<endl;
+ g_log<<Logger::Error<<"Non-fatal error initializing possible multiplexer"<<endl;
}
}
- L<<Logger::Error<<"No working multiplexer found!"<<endl;
+ g_log<<Logger::Error<<"No working multiplexer found!"<<endl;
exit(1);
}
try {
if(fname.empty()) {
t_pdl.reset();
- L<<Logger::Error<<t_id<<" Unloaded current lua script"<<endl;
+ g_log<<Logger::Error<<t_id<<" Unloaded current lua script"<<endl;
return new string("unloaded\n");
}
else {
}
}
catch(std::exception& e) {
- L<<Logger::Error<<t_id<<" Retaining current script, error from '"<<fname<<"': "<< e.what() <<endl;
+ g_log<<Logger::Error<<t_id<<" Retaining current script, error from '"<<fname<<"': "<< e.what() <<endl;
return new string("retaining current script, error from '"+fname+"': "+e.what()+"\n");
}
- L<<Logger::Warning<<t_id<<" (Re)loaded lua script from '"<<fname<<"'"<<endl;
+ g_log<<Logger::Warning<<t_id<<" (Re)loaded lua script from '"<<fname<<"'"<<endl;
return new string("(re)loaded '"+fname+"'\n");
}
if(readFileIfThere("/proc/sys/net/ipv6/route/max_size", &line)) {
int lim=std::stoi(line);
if(lim < 16384) {
- L<<Logger::Error<<"If using IPv6, please raise sysctl net.ipv6.route.max_size, currently set to "<<lim<<" which is < 16384"<<endl;
+ g_log<<Logger::Error<<"If using IPv6, please raise sysctl net.ipv6.route.max_size, currently set to "<<lim<<" which is < 16384"<<endl;
}
}
#endif
unsigned int hardlimit= getFilenumLimit(true);
if(hardlimit >= wantFDs) {
setFilenumLimit(wantFDs);
- L<<Logger::Warning<<"Raised soft limit on number of filedescriptors to "<<wantFDs<<" to match max-mthreads and threads settings"<<endl;
+ g_log<<Logger::Warning<<"Raised soft limit on number of filedescriptors to "<<wantFDs<<" to match max-mthreads and threads settings"<<endl;
}
else {
int newval = (hardlimit - 25) / g_numWorkerThreads;
- L<<Logger::Warning<<"Insufficient number of filedescriptors available for max-mthreads*threads setting! ("<<hardlimit<<" < "<<wantFDs<<"), reducing max-mthreads to "<<newval<<endl;
+ g_log<<Logger::Warning<<"Insufficient number of filedescriptors available for max-mthreads*threads setting! ("<<hardlimit<<" < "<<wantFDs<<"), reducing max-mthreads to "<<newval<<endl;
g_maxMThreads = newval;
setFilenumLimit(hardlimit);
}
allowFrom->addMask(line);
}
- L<<Logger::Warning<<"Done parsing " << allowFrom->size() <<" allow-from ranges from file '"<<::arg()["allow-from-file"]<<"' - overriding 'allow-from' setting"<<endl;
+ g_log<<Logger::Warning<<"Done parsing " << allowFrom->size() <<" allow-from ranges from file '"<<::arg()["allow-from-file"]<<"' - overriding 'allow-from' setting"<<endl;
}
else if(!::arg()["allow-from"].empty()) {
vector<string> ips;
stringtok(ips, ::arg()["allow-from"], ", ");
- L<<Logger::Warning<<"Only allowing queries from: ";
+ g_log<<Logger::Warning<<"Only allowing queries from: ";
for(vector<string>::const_iterator i = ips.begin(); i!= ips.end(); ++i) {
allowFrom->addMask(*i);
if(i!=ips.begin())
- L<<Logger::Warning<<", ";
- L<<Logger::Warning<<*i;
+ g_log<<Logger::Warning<<", ";
+ g_log<<Logger::Warning<<*i;
}
- L<<Logger::Warning<<endl;
+ g_log<<Logger::Warning<<endl;
}
else {
if(::arg()["local-address"]!="127.0.0.1" && ::arg().asNum("local-port")==53)
- L<<Logger::Error<<"WARNING: Allowing queries from all IP addresses - this can be a security risk!"<<endl;
+ g_log<<Logger::Error<<"WARNING: Allowing queries from all IP addresses - this can be a security risk!"<<endl;
allowFrom = nullptr;
}
const std::string value = ::arg()["cpu-map"];
if (!value.empty() && !isSettingThreadCPUAffinitySupported()) {
- L<<Logger::Warning<<"CPU mapping requested but not supported, skipping"<<endl;
+ g_log<<Logger::Warning<<"CPU mapping requested but not supported, skipping"<<endl;
return result;
}
}
}
catch(const std::exception& e) {
- L<<Logger::Error<<"Error parsing cpu-map entry '"<<part<<"': "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Error parsing cpu-map entry '"<<part<<"': "<<e.what()<<endl;
}
}
if (cpuMapping != cpusMap.cend()) {
int rc = mapThreadToCPUList(tid, cpuMapping->second);
if (rc == 0) {
- L<<Logger::Info<<"CPU affinity for worker "<<n<<" has been set to CPU map:";
+ g_log<<Logger::Info<<"CPU affinity for worker "<<n<<" has been set to CPU map:";
for (const auto cpu : cpuMapping->second) {
- L<<Logger::Info<<" "<<cpu;
+ g_log<<Logger::Info<<" "<<cpu;
}
- L<<Logger::Info<<endl;
+ g_log<<Logger::Info<<endl;
}
else {
- L<<Logger::Warning<<"Error setting CPU affinity for worker "<<n<<" to CPU map:";
+ g_log<<Logger::Warning<<"Error setting CPU affinity for worker "<<n<<" to CPU map:";
for (const auto cpu : cpuMapping->second) {
- L<<Logger::Info<<" "<<cpu;
+ g_log<<Logger::Info<<" "<<cpu;
}
- L<<Logger::Info<<strerror(rc)<<endl;
+ g_log<<Logger::Info<<strerror(rc)<<endl;
}
}
}
static int serviceMain(int argc, char*argv[])
{
- L.setName(s_programname);
- L.disableSyslog(::arg().mustDo("disable-syslog"));
- L.setTimestamps(::arg().mustDo("log-timestamp"));
+ g_log.setName(s_programname);
+ g_log.disableSyslog(::arg().mustDo("disable-syslog"));
+ g_log.setTimestamps(::arg().mustDo("log-timestamp"));
if(!::arg()["logging-facility"].empty()) {
int val=logFacilityToLOG(::arg().asNum("logging-facility") );
if(val >= 0)
- theL().setFacility(val);
+ g_log.setFacility(val);
else
- L<<Logger::Error<<"Unknown logging facility "<<::arg().asNum("logging-facility") <<endl;
+ g_log<<Logger::Error<<"Unknown logging facility "<<::arg().asNum("logging-facility") <<endl;
}
showProductVersion();
vector<string> addrs;
if(!::arg()["query-local-address6"].empty()) {
SyncRes::s_doIPv6=true;
- L<<Logger::Warning<<"Enabling IPv6 transport for outgoing queries"<<endl;
+ g_log<<Logger::Warning<<"Enabling IPv6 transport for outgoing queries"<<endl;
stringtok(addrs, ::arg()["query-local-address6"], ", ;");
for(const string& addr : addrs) {
}
}
else {
- L<<Logger::Warning<<"NOT using IPv6 for outgoing queries - set 'query-local-address6=::' to enable"<<endl;
+ g_log<<Logger::Warning<<"NOT using IPv6 for outgoing queries - set 'query-local-address6=::' to enable"<<endl;
}
addrs.clear();
stringtok(addrs, ::arg()["query-local-address"], ", ;");
}
}
catch(std::exception& e) {
- L<<Logger::Error<<"Assigning local query addresses: "<<e.what();
+ g_log<<Logger::Error<<"Assigning local query addresses: "<<e.what();
exit(99);
}
else if(::arg()["dnssec"]=="log-fail")
g_dnssecmode=DNSSECMode::ValidateForLog;
else {
- L<<Logger::Error<<"Unknown DNSSEC mode "<<::arg()["dnssec"]<<endl;
+ g_log<<Logger::Error<<"Unknown DNSSEC mode "<<::arg()["dnssec"]<<endl;
exit(1);
}
loadRecursorLuaConfig(::arg()["lua-config-file"], ::arg().mustDo("daemon"));
}
catch (PDNSException &e) {
- L<<Logger::Error<<"Cannot load Lua configuration: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Cannot load Lua configuration: "<<e.reason<<endl;
exit(1);
}
ips.push_back("0.0.0.0");
ips.push_back("::");
- L<<Logger::Warning<<"Will not send queries to: ";
+ g_log<<Logger::Warning<<"Will not send queries to: ";
for(vector<string>::const_iterator i = ips.begin(); i!= ips.end(); ++i) {
SyncRes::addDontQuery(*i);
if(i!=ips.begin())
- L<<Logger::Warning<<", ";
- L<<Logger::Warning<<*i;
+ g_log<<Logger::Warning<<", ";
+ g_log<<Logger::Warning<<*i;
}
- L<<Logger::Warning<<endl;
+ g_log<<Logger::Warning<<endl;
}
g_quiet=::arg().mustDo("quiet");
g_weDistributeQueries = ::arg().mustDo("pdns-distributes-queries");
if(g_weDistributeQueries) {
- L<<Logger::Warning<<"PowerDNS Recursor itself will distribute queries over threads"<<endl;
+ g_log<<Logger::Warning<<"PowerDNS Recursor itself will distribute queries over threads"<<endl;
}
setupDelegationOnly();
g_numWorkerThreads = ::arg().asNum("threads");
if (g_numWorkerThreads < 1) {
- L<<Logger::Warning<<"Asked to run with 0 threads, raising to 1 instead"<<endl;
+ g_log<<Logger::Warning<<"Asked to run with 0 threads, raising to 1 instead"<<endl;
g_numWorkerThreads = 1;
}
}
if(::arg().mustDo("daemon")) {
- L<<Logger::Warning<<"Calling daemonize, going to background"<<endl;
- L.toConsole(Logger::Critical);
+ g_log<<Logger::Warning<<"Calling daemonize, going to background"<<endl;
+ g_log.toConsole(Logger::Critical);
daemonize();
loadRecursorLuaConfig(::arg()["lua-config-file"], false);
}
#ifdef HAVE_LIBSODIUM
if (sodium_init() == -1) {
- L<<Logger::Error<<"Unable to initialize sodium crypto library"<<endl;
+ g_log<<Logger::Error<<"Unable to initialize sodium crypto library"<<endl;
exit(99);
}
#endif
char *ns;
ns = getenv("NOTIFY_SOCKET");
if (ns != nullptr) {
- L<<Logger::Error<<"Unable to chroot when running from systemd. Please disable chroot= or set the 'Type' for this service to 'simple'"<<endl;
+ g_log<<Logger::Error<<"Unable to chroot when running from systemd. Please disable chroot= or set the 'Type' for this service to 'simple'"<<endl;
exit(1);
}
#endif
if (chroot(::arg()["chroot"].c_str())<0 || chdir("/") < 0) {
- L<<Logger::Error<<"Unable to chroot to '"+::arg()["chroot"]+"': "<<strerror (errno)<<", exiting"<<endl;
+ g_log<<Logger::Error<<"Unable to chroot to '"+::arg()["chroot"]+"': "<<strerror (errno)<<", exiting"<<endl;
exit(1);
}
else
- L<<Logger::Error<<"Chrooted to '"<<::arg()["chroot"]<<"'"<<endl;
+ g_log<<Logger::Error<<"Chrooted to '"<<::arg()["chroot"]<<"'"<<endl;
}
s_pidfname=::arg()["socket-dir"]+"/"+s_programname+".pid";
int port = ::arg().asNum("udp-source-port-min");
if(port < 1024 || port > 65535){
- L<<Logger::Error<<"Unable to launch, udp-source-port-min is not a valid port number"<<endl;
+ g_log<<Logger::Error<<"Unable to launch, udp-source-port-min is not a valid port number"<<endl;
exit(99); // this isn't going to fix itself either
}
s_minUdpSourcePort = port;
port = ::arg().asNum("udp-source-port-max");
if(port < 1024 || port > 65535 || port < s_minUdpSourcePort){
- L<<Logger::Error<<"Unable to launch, udp-source-port-max is not a valid port number or is smaller than udp-source-port-min"<<endl;
+ g_log<<Logger::Error<<"Unable to launch, udp-source-port-max is not a valid port number or is smaller than udp-source-port-min"<<endl;
exit(99); // this isn't going to fix itself either
}
s_maxUdpSourcePort = port;
{
port = std::stoi(part);
if(port < 1024 || port > 65535){
- L<<Logger::Error<<"Unable to launch, udp-source-port-avoid contains an invalid port number: "<<part<<endl;
+ g_log<<Logger::Error<<"Unable to launch, udp-source-port-avoid contains an invalid port number: "<<part<<endl;
exit(99); // this isn't going to fix itself either
}
s_avoidUdpSourcePorts.insert(port);
const auto cpusMap = parseCPUMap();
if(g_numThreads == 1) {
- L<<Logger::Warning<<"Operating unthreaded"<<endl;
+ g_log<<Logger::Warning<<"Operating unthreaded"<<endl;
#ifdef HAVE_SYSTEMD
sd_notify(0, "READY=1");
#endif
}
else {
pthread_t tid;
- L<<Logger::Warning<<"Launching "<< g_numThreads <<" threads"<<endl;
+ g_log<<Logger::Warning<<"Launching "<< g_numThreads <<" threads"<<endl;
for(unsigned int n=0; n < g_numThreads; ++n) {
pthread_create(&tid, 0, recursorThread, (void*)(long)n);
#ifdef HAVE_PROTOBUF
t_uuidGenerator = std::unique_ptr<boost::uuids::random_generator>(new boost::uuids::random_generator());
#endif
- L<<Logger::Warning<<"Done priming cache with root hints"<<endl;
+ g_log<<Logger::Warning<<"Done priming cache with root hints"<<endl;
try {
if(!::arg()["lua-dns-script"].empty()) {
t_pdl = std::make_shared<RecursorLua4>();
t_pdl->loadFile(::arg()["lua-dns-script"]);
- L<<Logger::Warning<<"Loaded 'lua' script from '"<<::arg()["lua-dns-script"]<<"'"<<endl;
+ g_log<<Logger::Warning<<"Loaded 'lua' script from '"<<::arg()["lua-dns-script"]<<"'"<<endl;
}
}
catch(std::exception &e) {
- L<<Logger::Error<<"Failed to load 'lua' script from '"<<::arg()["lua-dns-script"]<<"': "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Failed to load 'lua' script from '"<<::arg()["lua-dns-script"]<<"': "<<e.what()<<endl;
_exit(99);
}
t_fdm=getMultiplexer();
if(!t_id) {
if(::arg().mustDo("webserver")) {
- L<<Logger::Warning << "Enabling web server" << endl;
+ g_log<<Logger::Warning << "Enabling web server" << endl;
try {
new RecursorWebServer(t_fdm);
}
catch(PDNSException &e) {
- L<<Logger::Error<<"Exception: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Exception: "<<e.reason<<endl;
exit(99);
}
}
- L<<Logger::Error<<"Enabled '"<< t_fdm->getName() << "' multiplexer"<<endl;
+ g_log<<Logger::Error<<"Enabled '"<< t_fdm->getName() << "' multiplexer"<<endl;
}
t_fdm->addReadFD(g_pipes[t_id].readToThread, handlePipeRequest);
for(expired_t::iterator i=expired.begin() ; i != expired.end(); ++i) {
shared_ptr<TCPConnection> conn=any_cast<shared_ptr<TCPConnection> >(i->second);
if(g_logCommonErrors)
- L<<Logger::Warning<<"Timeout from remote TCP client "<< conn->d_remote.toStringWithPort() <<endl;
+ g_log<<Logger::Warning<<"Timeout from remote TCP client "<< conn->d_remote.toStringWithPort() <<endl;
t_fdm->removeReadFD(i->first);
}
}
}
}
catch(PDNSException &ae) {
- L<<Logger::Error<<"Exception: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Exception: "<<ae.reason<<endl;
return 0;
}
catch(std::exception &e) {
- L<<Logger::Error<<"STL Exception: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"STL Exception: "<<e.what()<<endl;
return 0;
}
catch(...) {
- L<<Logger::Error<<"any other exception in main: "<<endl;
+ g_log<<Logger::Error<<"any other exception in main: "<<endl;
return 0;
}
::arg().setCmd("help","Provide a helpful message");
::arg().setCmd("version","Print version string");
::arg().setCmd("config","Output blank configuration");
- L.toConsole(Logger::Info);
+ g_log.toConsole(Logger::Info);
::arg().laxParse(argc,argv); // do a lax parse
string configname=::arg()["config-dir"]+"/recursor.conf";
}
if(!::arg().file(configname.c_str()))
- L<<Logger::Warning<<"Unable to parse configuration file '"<<configname<<"'"<<endl;
+ g_log<<Logger::Warning<<"Unable to parse configuration file '"<<configname<<"'"<<endl;
::arg().parse(argc,argv);
if( !::arg()["chroot"].empty() && !::arg()["api-config-dir"].empty() && !::arg().mustDo("api-readonly") ) {
- L<<Logger::Error<<"Using chroot and a writable API is not possible"<<endl;
+ g_log<<Logger::Error<<"Using chroot and a writable API is not possible"<<endl;
exit(EXIT_FAILURE);
}
if(!g_quiet && logUrgency < Logger::Info) { // Logger::Info=6, Logger::Debug=7
logUrgency = Logger::Info; // if you do --quiet=no, you need Info to also see the query log
}
- L.setLoglevel(logUrgency);
- L.toConsole(logUrgency);
+ g_log.setLoglevel(logUrgency);
+ g_log.toConsole(logUrgency);
serviceMain(argc, argv);
}
catch(PDNSException &ae) {
- L<<Logger::Error<<"Exception: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Exception: "<<ae.reason<<endl;
ret=EXIT_FAILURE;
}
catch(std::exception &e) {
- L<<Logger::Error<<"STL Exception: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"STL Exception: "<<e.what()<<endl;
ret=EXIT_FAILURE;
}
catch(...) {
- L<<Logger::Error<<"any other exception in main: "<<endl;
+ g_log<<Logger::Error<<"any other exception in main: "<<endl;
ret=EXIT_FAILURE;
}
::arg().set("max-signature-cache-entries", "Maximum number of signatures cache entries")="";
::arg().laxFile(configname.c_str());
- L.toConsole(Logger::Error); // so we print any errors
+ g_log.toConsole(Logger::Error); // so we print any errors
BackendMakers().launch(::arg()["launch"]); // vrooooom!
if(::arg().asNum("loglevel") >= 3) // so you can't kill our errors
- L.toConsole((Logger::Urgency)::arg().asNum("loglevel"));
+ g_log.toConsole((Logger::Urgency)::arg().asNum("loglevel"));
//cerr<<"Backend: "<<::arg()["launch"]<<", '" << ::arg()["gmysql-dbname"] <<"'" <<endl;
void logError(const std::string& operation) const {
if (d_err) {
std::string msg = boost::str( boost::format("PKCS#11 operation %s failed: %s (0x%X)") % operation % p11_kit_strerror(d_err) % d_err );
- L<<Logger::Error<< msg << endl;
+ g_log<<Logger::Error<< msg << endl;
}
}
void logError(const std::string& operation) const {
if (d_err) {
std::string msg = boost::str( boost::format("PKCS#11 operation %s failed: %s (0x%X)") % operation % p11_kit_strerror(d_err) % d_err );
- L<<Logger::Error<< msg << endl;
+ g_log<<Logger::Error<< msg << endl;
}
}
attr.push_back(P11KitAttribute(CKA_LABEL, d_label));
FindObjects2(attr, key, 1);
if (key.size() == 0) {
- L<<Logger::Warning<<"Cannot load PCKS#11 private key "<<d_label<<std::endl;;
+ g_log<<Logger::Warning<<"Cannot load PCKS#11 private key "<<d_label<<std::endl;;
return;
}
d_private_key = key[0];
attr.push_back(P11KitAttribute(CKA_LABEL, d_pub_label));
FindObjects2(attr, key, 1);
if (key.size() == 0) {
- L<<Logger::Warning<<"Cannot load PCKS#11 public key "<<d_pub_label<<std::endl;
+ g_log<<Logger::Warning<<"Cannot load PCKS#11 public key "<<d_pub_label<<std::endl;
return;
}
d_public_key = key[0];
// this is required by certain tokens, otherwise C_GetSlotInfo will not return a token
err = functions->C_GetSlotList(CK_FALSE, NULL_PTR, &slots);
if (err) {
- L<<Logger::Warning<<"C_GetSlotList(CK_FALSE, NULL_PTR, &slots) = " << err << std::endl;
+ g_log<<Logger::Warning<<"C_GetSlotList(CK_FALSE, NULL_PTR, &slots) = " << err << std::endl;
return err;
}
std::vector<CK_SLOT_ID> slotIds(slots);
err = functions->C_GetSlotList(CK_FALSE, slotIds.data(), &slots);
if (err) {
- L<<Logger::Warning<<"C_GetSlotList(CK_FALSE, slotIds, &slots) = " << err << std::endl;
+ g_log<<Logger::Warning<<"C_GetSlotList(CK_FALSE, slotIds, &slots) = " << err << std::endl;
return err;
}
if (slotId == static_cast<CK_SLOT_ID>(-1))
continue;
if ((err = functions->C_GetSlotInfo(slotId, info))) {
- L<<Logger::Warning<<"C_GetSlotList("<<slotId<<", info) = " << err << std::endl;
+ g_log<<Logger::Warning<<"C_GetSlotList("<<slotId<<", info) = " << err << std::endl;
return err;
}
if ((err = functions->C_GetTokenInfo(slotId, &tinfo))) {
- L<<Logger::Warning<<"C_GetSlotList("<<slotId<<", &tinfo) = " << err << std::endl;
+ g_log<<Logger::Warning<<"C_GetSlotList("<<slotId<<", &tinfo) = " << err << std::endl;
return err;
}
std::string slotName;
try {
slotId = std::stoi(tokenId);
if ((err = functions->C_GetSlotInfo(slotId, info))) {
- L<<Logger::Warning<<"C_GetSlotList("<<slotId<<", info) = " << err << std::endl;
+ g_log<<Logger::Warning<<"C_GetSlotList("<<slotId<<", info) = " << err << std::endl;
return err;
}
- L<<Logger::Warning<<"Specifying PKCS#11 token by SLOT ID is deprecated and should not be used"<<std::endl;
+ g_log<<Logger::Warning<<"Specifying PKCS#11 token by SLOT ID is deprecated and should not be used"<<std::endl;
return 0;
} catch (...) {
return CKR_SLOT_ID_INVALID;
throw PDNSException("Not logged in to token");
if (d_slot->Digest(msg, result, &mech)) {
- L<<Logger::Error<<"Could not digest using PKCS#11 token - using software workaround"<<endl;
+ g_log<<Logger::Error<<"Could not digest using PKCS#11 token - using software workaround"<<endl;
// FINE! I'll do this myself, then, shall I?
switch(d_algorithm) {
case 5: {
void seedRandom(const string& source)
{
- L<<Logger::Warning<<"Reading random entropy from '"<<source<<"'"<<endl;
+ g_log<<Logger::Warning<<"Reading random entropy from '"<<source<<"'"<<endl;
int fd=open(source.c_str(), O_RDONLY);
if(fd < 0) {
- L<<Logger::Error<<"Unable to open source of random '"<<source<<"': "<<stringerror()<<endl;
+ g_log<<Logger::Error<<"Unable to open source of random '"<<source<<"': "<<stringerror()<<endl;
exit(EXIT_FAILURE);
}
char seed[16];
while(pos!=sizeof(seed)) {
ret = read(fd, seed+pos, sizeof(seed)-pos);
if(ret < 0) {
- L<<Logger::Error<<"Unable to read random seed from "<<source<<": "<<stringerror()<<endl;
+ g_log<<Logger::Error<<"Unable to read random seed from "<<source<<": "<<stringerror()<<endl;
close(fd);
exit(EXIT_FAILURE);
}
if(!ret) {
- L<<Logger::Error<<"Unable to read random seed from "<<source<<": end of file"<<endl;
+ g_log<<Logger::Error<<"Unable to read random seed from "<<source<<": end of file"<<endl;
close(fd);
exit(EXIT_FAILURE);
}
int ret=asendtcp(msg, &s); // this will actually do the right thing waiting on the connect
if(ret < 0)
- L<<Logger::Warning<<"Error writing carbon data to "<<remote.toStringWithPort()<<": "<<strerror(errno)<<endl;
+ g_log<<Logger::Warning<<"Error writing carbon data to "<<remote.toStringWithPort()<<": "<<strerror(errno)<<endl;
if(ret==0)
- L<<Logger::Warning<<"Timeout connecting/writing carbon data to "<<remote.toStringWithPort()<<endl;
+ g_log<<Logger::Warning<<"Timeout connecting/writing carbon data to "<<remote.toStringWithPort()<<endl;
}
}
catch(PDNSException& e)
{
- L<<Logger::Error<<"Error in carbon thread: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error in carbon thread: "<<e.reason<<endl;
}
catch(std::exception& e)
{
- L<<Logger::Error<<"Error in carbon thread: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Error in carbon thread: "<<e.what()<<endl;
}
catch(...)
{
- L<<Logger::Error<<"Unknown error in carbon thread"<<endl;
+ g_log<<Logger::Error<<"Unknown error in carbon thread"<<endl;
}
zone->reserve(zoneSizeHint);
}
}
- theL()<<Logger::Warning<<"Loading RPZ from file '"<<filename<<"'"<<endl;
+ g_log<<Logger::Warning<<"Loading RPZ from file '"<<filename<<"'"<<endl;
zone->setName(polName);
loadRPZFromFile(filename, zone, defpol, maxTTL);
lci.dfe.addZone(zone);
- theL()<<Logger::Warning<<"Done loading RPZ from file '"<<filename<<"'"<<endl;
+ g_log<<Logger::Warning<<"Done loading RPZ from file '"<<filename<<"'"<<endl;
}
catch(const std::exception& e) {
- theL()<<Logger::Error<<"Unable to load RPZ zone from '"<<filename<<"': "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Unable to load RPZ zone from '"<<filename<<"': "<<e.what()<<endl;
}
});
zoneIdx = lci.dfe.addZone(zone);
}
catch(const std::exception& e) {
- theL()<<Logger::Error<<"Problem configuring 'rpzMaster': "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Problem configuring 'rpzMaster': "<<e.what()<<endl;
exit(1); // FIXME proper exit code?
}
catch(const PDNSException& e) {
- theL()<<Logger::Error<<"Problem configuring 'rpzMaster': "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Problem configuring 'rpzMaster': "<<e.reason<<endl;
exit(1); // FIXME proper exit code?
}
}
}
catch(const std::exception& e) {
- theL()<<Logger::Error<<"Problem starting RPZIXFRTracker thread: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Problem starting RPZIXFRTracker thread: "<<e.what()<<endl;
exit(1); // FIXME proper exit code?
}
catch(const PDNSException& e) {
- theL()<<Logger::Error<<"Problem starting RPZIXFRTracker thread: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Problem starting RPZIXFRTracker thread: "<<e.reason<<endl;
exit(1); // FIXME proper exit code?
}
});
}
}
catch(std::exception& e) {
- theL()<<Logger::Error<<"Error in addSortList: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Error in addSortList: "<<e.what()<<endl;
}
});
}
}
else {
- theL()<<Logger::Error<<"Only one protobuf server can be configured, we already have "<<lci.protobufServer->toString()<<endl;
+ g_log<<Logger::Error<<"Only one protobuf server can be configured, we already have "<<lci.protobufServer->toString()<<endl;
}
}
catch(std::exception& e) {
- theL()<<Logger::Error<<"Error while starting protobuf logger to '"<<server_<<": "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Error while starting protobuf logger to '"<<server_<<": "<<e.what()<<endl;
}
catch(PDNSException& e) {
- theL()<<Logger::Error<<"Error while starting protobuf logger to '"<<server_<<": "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error while starting protobuf logger to '"<<server_<<": "<<e.reason<<endl;
}
});
}
}
else {
- theL()<<Logger::Error<<"Only one protobuf server can be configured, we already have "<<lci.protobufServer->toString()<<endl;
+ g_log<<Logger::Error<<"Only one protobuf server can be configured, we already have "<<lci.protobufServer->toString()<<endl;
}
}
catch(std::exception& e) {
- theL()<<Logger::Error<<"Error while starting protobuf logger to '"<<server_<<": "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Error while starting protobuf logger to '"<<server_<<": "<<e.what()<<endl;
}
catch(PDNSException& e) {
- theL()<<Logger::Error<<"Error while starting protobuf logger to '"<<server_<<": "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error while starting protobuf logger to '"<<server_<<": "<<e.reason<<endl;
}
});
#endif
g_luaconfs.setState(lci);
}
catch(const LuaContext::ExecutionErrorException& e) {
- theL()<<Logger::Error<<"Unable to load Lua script from '"+fname+"': ";
+ g_log<<Logger::Error<<"Unable to load Lua script from '"+fname+"': ";
try {
std::rethrow_if_nested(e);
} catch(const std::exception& exp) {
// exp is the exception that was thrown from inside the lambda
- theL() << exp.what() << std::endl;
+ g_log << exp.what() << std::endl;
}
catch(const PDNSException& exp) {
// exp is the exception that was thrown from inside the lambda
- theL() << exp.reason << std::endl;
+ g_log << exp.reason << std::endl;
}
throw;
}
catch(std::exception& err) {
- theL()<<Logger::Error<<"Unable to load Lua script from '"+fname+"': "<<err.what()<<endl;
+ g_log<<Logger::Error<<"Unable to load Lua script from '"+fname+"': "<<err.what()<<endl;
throw;
}
static void registerCounter64Stat(const char* name, const oid statOID[], size_t statOIDLength)
{
if (statOIDLength != OID_LENGTH(questionsOID)) {
- L<<Logger::Error<<"Invalid OID for SNMP Counter64 statistic "<<std::string(name)<<endl;
+ g_log<<Logger::Error<<"Invalid OID for SNMP Counter64 statistic "<<std::string(name)<<endl;
return;
}
if (s_statsMap.find(statOID[statOIDLength - 1]) != s_statsMap.end()) {
- L<<Logger::Error<<"OID for SNMP Counter64 statistic "<<std::string(name)<<" has already been registered"<<endl;
+ g_log<<Logger::Error<<"OID for SNMP Counter64 statistic "<<std::string(name)<<" has already been registered"<<endl;
return;
}
if (pdns_iequals(*begin, "on") || pdns_iequals(*begin, "yes")) {
if (!g_dnssecLogBogus) {
- L<<Logger::Warning<<"Enabling DNSSEC Bogus logging, requested via control channel"<<endl;
+ g_log<<Logger::Warning<<"Enabling DNSSEC Bogus logging, requested via control channel"<<endl;
g_dnssecLogBogus = true;
return "DNSSEC Bogus logging enabled\n";
}
if (pdns_iequals(*begin, "off") || pdns_iequals(*begin, "no")) {
if (g_dnssecLogBogus) {
- L<<Logger::Warning<<"Disabling DNSSEC Bogus logging, requested via control channel"<<endl;
+ g_log<<Logger::Warning<<"Disabling DNSSEC Bogus logging, requested via control channel"<<endl;
g_dnssecLogBogus = false;
return "DNSSEC Bogus logging disabled\n";
}
if (begin != end)
why += " ";
}
- L<<Logger::Warning<<"Adding Negative Trust Anchor for "<<who<<" with reason '"<<why<<"', requested via control channel"<<endl;
+ g_log<<Logger::Warning<<"Adding Negative Trust Anchor for "<<who<<" with reason '"<<why<<"', requested via control channel"<<endl;
g_luaconfs.modify([who, why](LuaConfigItems& lci) {
lci.negAnchors[who] = why;
});
return "No Negative Trust Anchor specified, doing nothing.\n";
if (begin + 1 == end && *begin == "*"){
- L<<Logger::Warning<<"Clearing all Negative Trust Anchors, requested via control channel"<<endl;
+ g_log<<Logger::Warning<<"Clearing all Negative Trust Anchors, requested via control channel"<<endl;
g_luaconfs.modify([](LuaConfigItems& lci) {
lci.negAnchors.clear();
});
string removed("");
bool first(true);
for (auto const &entry : toRemove) {
- L<<Logger::Warning<<"Clearing Negative Trust Anchor for "<<entry<<", requested via control channel"<<endl;
+ g_log<<Logger::Warning<<"Clearing Negative Trust Anchor for "<<entry<<", requested via control channel"<<endl;
g_luaconfs.modify([entry](LuaConfigItems& lci) {
lci.negAnchors.erase(entry);
});
}
try {
- L<<Logger::Warning<<"Adding Trust Anchor for "<<who<<" with data '"<<what<<"', requested via control channel";
+ g_log<<Logger::Warning<<"Adding Trust Anchor for "<<who<<" with data '"<<what<<"', requested via control channel";
g_luaconfs.modify([who, what](LuaConfigItems& lci) {
auto ds = unique_ptr<DSRecordContent>(dynamic_cast<DSRecordContent*>(DSRecordContent::make(what)));
lci.dsAnchors[who].insert(*ds);
});
broadcastAccFunction<uint64_t>(boost::bind(pleaseWipePacketCache, who, true));
- L<<Logger::Warning<<endl;
+ g_log<<Logger::Warning<<endl;
return "Added Trust Anchor for " + who.toStringRootDot() + " with data " + what + "\n";
}
catch(std::exception &e) {
- L<<Logger::Warning<<", failed: "<<e.what()<<endl;
+ g_log<<Logger::Warning<<", failed: "<<e.what()<<endl;
return "Unable to add Trust Anchor for " + who.toStringRootDot() + ": " + e.what() + "\n";
}
}
string removed("");
bool first(true);
for (auto const &entry : toRemove) {
- L<<Logger::Warning<<"Removing Trust Anchor for "<<entry<<", requested via control channel"<<endl;
+ g_log<<Logger::Warning<<"Removing Trust Anchor for "<<entry<<", requested via control channel"<<endl;
g_luaconfs.modify([entry](LuaConfigItems& lci) {
lci.dsAnchors.erase(entry);
});
static void doExitGeneric(bool nicely)
{
- L<<Logger::Error<<"Exiting on user request"<<endl;
+ g_log<<Logger::Error<<"Exiting on user request"<<endl;
extern RecursorControlChannel s_rcc;
s_rcc.~RecursorControlChannel();
try {
loadRecursorLuaConfig(::arg()["lua-config-file"], false);
- L<<Logger::Warning<<"Reloaded Lua configuration file '"<<::arg()["lua-config-file"]<<"', requested via control channel"<<endl;
+ g_log<<Logger::Warning<<"Reloaded Lua configuration file '"<<::arg()["lua-config-file"]<<"', requested via control channel"<<endl;
return "Reloaded Lua configuration file '"+::arg()["lua-config-file"]+"'\n";
}
catch(std::exception& e) {
if(cmd=="reload-acls") {
if(!::arg()["chroot"].empty()) {
- L<<Logger::Error<<"Unable to reload ACL when chroot()'ed, requested via control channel"<<endl;
+ g_log<<Logger::Error<<"Unable to reload ACL when chroot()'ed, requested via control channel"<<endl;
return "Unable to reload ACL when chroot()'ed, please restart\n";
}
}
catch(std::exception& e)
{
- L<<Logger::Error<<"Reloading ACLs failed (Exception: "<<e.what()<<")"<<endl;
+ g_log<<Logger::Error<<"Reloading ACLs failed (Exception: "<<e.what()<<")"<<endl;
return e.what() + string("\n");
}
catch(PDNSException& ae)
{
- L<<Logger::Error<<"Reloading ACLs failed (PDNSException: "<<ae.reason<<")"<<endl;
+ g_log<<Logger::Error<<"Reloading ACLs failed (PDNSException: "<<ae.reason<<")"<<endl;
return ae.reason + string("\n");
}
return "ok\n";
if(cmd=="reload-zones") {
if(!::arg()["chroot"].empty()) {
- L<<Logger::Error<<"Unable to reload zones and forwards when chroot()'ed, requested via control channel"<<endl;
+ g_log<<Logger::Error<<"Unable to reload zones and forwards when chroot()'ed, requested via control channel"<<endl;
return "Unable to reload zones and forwards when chroot()'ed, please restart\n";
}
return reloadAuthAndForwards();
// start (sys)logging
-/** \var Logger L
-\brief All logging is done via L, a Logger instance
-*/
-
/**
\file receiver.cc
int i=open("/dev/null",O_RDWR); /* open stdin */
if(i < 0)
- L<<Logger::Critical<<"Unable to open /dev/null: "<<stringerror()<<endl;
+ g_log<<Logger::Critical<<"Unable to open /dev/null: "<<stringerror()<<endl;
else {
dup2(i,0); /* stdin */
dup2(i,1); /* stderr */
static void takedown(int i)
{
if(cpid) {
- L<<Logger::Error<<"Guardian is killed, taking down children with us"<<endl;
+ g_log<<Logger::Error<<"Guardian is killed, taking down children with us"<<endl;
kill(cpid,SIGKILL);
exit(0);
}
if(of)
of<<getpid()<<endl;
else
- L<<Logger::Error<<"Writing pid for "<<getpid()<<" to "<<fname<<" failed: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Writing pid for "<<getpid()<<" to "<<fname<<" failed: "<<strerror(errno)<<endl;
}
int g_fd1[2], g_fd2[2];
setStatus("Launching child");
if(pipe(g_fd1)<0 || pipe(g_fd2)<0) {
- L<<Logger::Critical<<"Unable to open pipe for coprocess: "<<strerror(errno)<<endl;
+ g_log<<Logger::Critical<<"Unable to open pipe for coprocess: "<<strerror(errno)<<endl;
exit(1);
}
if(!(g_fp=fdopen(g_fd2[0],"r"))) {
- L<<Logger::Critical<<"Unable to associate a file pointer with pipe: "<<stringerror()<<endl;
+ g_log<<Logger::Critical<<"Unable to associate a file pointer with pipe: "<<stringerror()<<endl;
exit(1);
}
setbuf(g_fp,0); // no buffering please, confuses select
if(::arg()["config-name"]!="") {
progname+="-"+::arg()["config-name"];
- L<<Logger::Error<<"Virtual configuration name: "<<::arg()["config-name"]<<endl;
+ g_log<<Logger::Error<<"Virtual configuration name: "<<::arg()["config-name"]<<endl;
}
newargv[0]=strdup(const_cast<char *>((progname+"-instance").c_str()));
}
newargv[n]=0;
- L<<Logger::Error<<"Guardian is launching an instance"<<endl;
+ g_log<<Logger::Error<<"Guardian is launching an instance"<<endl;
close(g_fd1[1]);
fclose(g_fp); // this closes g_fd2[0] for us
close(g_fd2[1]);
}
if(execvp(argv[0], newargv)<0) {
- L<<Logger::Error<<"Unable to execvp '"<<argv[0]<<"': "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to execvp '"<<argv[0]<<"': "<<strerror(errno)<<endl;
char **p=newargv;
while(*p)
- L<<Logger::Error<<*p++<<endl;
+ g_log<<Logger::Error<<*p++<<endl;
exit(1);
}
- L<<Logger::Error<<"execvp returned!!"<<endl;
+ g_log<<Logger::Error<<"execvp returned!!"<<endl;
// never reached
}
else if(pid>0) { // parent
int ret=waitpid(pid,&status,WNOHANG);
if(ret<0) {
- L<<Logger::Error<<"In guardian loop, waitpid returned error: "<<strerror(errno)<<endl;
- L<<Logger::Error<<"Dying"<<endl;
+ g_log<<Logger::Error<<"In guardian loop, waitpid returned error: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Dying"<<endl;
exit(1);
}
else if(ret) // something exited
int ret=WEXITSTATUS(status);
if(ret==99) {
- L<<Logger::Error<<"Child requested a stop, exiting"<<endl;
+ g_log<<Logger::Error<<"Child requested a stop, exiting"<<endl;
exit(1);
}
setStatus("Child died with code "+itoa(ret));
- L<<Logger::Error<<"Our pdns instance exited with code "<<ret<<", respawning"<<endl;
+ g_log<<Logger::Error<<"Our pdns instance exited with code "<<ret<<", respawning"<<endl;
sleep(1);
continue;
if(WIFSIGNALED(status)) {
int sig=WTERMSIG(status);
setStatus("Child died because of signal "+itoa(sig));
- L<<Logger::Error<<"Our pdns instance ("<<pid<<") exited after signal "<<sig<<endl;
+ g_log<<Logger::Error<<"Our pdns instance ("<<pid<<") exited after signal "<<sig<<endl;
#ifdef WCOREDUMP
if(WCOREDUMP(status))
- L<<Logger::Error<<"Dumped core"<<endl;
+ g_log<<Logger::Error<<"Dumped core"<<endl;
#endif
- L<<Logger::Error<<"Respawning"<<endl;
+ g_log<<Logger::Error<<"Respawning"<<endl;
sleep(1);
continue;
}
- L<<Logger::Error<<"No clue what happened! Respawning"<<endl;
+ g_log<<Logger::Error<<"No clue what happened! Respawning"<<endl;
}
else {
- L<<Logger::Error<<"Unable to fork: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to fork: "<<strerror(errno)<<endl;
exit(1);
}
}
res=UeberBackend::loadmodule(::arg()["module-dir"]+"/"+module);
if(res==false) {
- L<<Logger::Error<<"Receiver unable to load module "<<module<<endl;
+ g_log<<Logger::Error<<"Receiver unable to load module "<<module<<endl;
exit(1);
}
}
#include <execinfo.h>
static void tbhandler(int num)
{
- L<<Logger::Critical<<"Got a signal "<<num<<", attempting to print trace: "<<endl;
+ g_log<<Logger::Critical<<"Got a signal "<<num<<", attempting to print trace: "<<endl;
void *array[20]; //only care about last 17 functions (3 taken with tracing support)
size_t size;
char **strings;
strings = backtrace_symbols (array, size); //Need -rdynamic gcc (linker) flag for this to work
for (i = 0; i < size; i++) //skip useless functions
- L<<Logger::Error<<strings[i]<<endl;
+ g_log<<Logger::Error<<strings[i]<<endl;
signal(SIGABRT, SIG_DFL);
std::ios_base::sync_with_stdio(false);
- L.toConsole(Logger::Warning);
+ g_log.toConsole(Logger::Warning);
try {
declareArguments();
UNIX_declareArguments();
if(::arg()["config-name"]!="")
s_programname+="-"+::arg()["config-name"];
- (void)theL(s_programname);
+ g_log.setName(s_programname);
string configname=::arg()["config-dir"]+"/"+s_programname+".conf";
cleanSlashes(configname);
if(!::arg()["logging-facility"].empty()) {
int val=logFacilityToLOG(::arg().asNum("logging-facility") );
if(val >= 0)
- theL().setFacility(val);
+ g_log.setFacility(val);
else
- L<<Logger::Error<<"Unknown logging facility "<<::arg().asNum("logging-facility") <<endl;
+ g_log<<Logger::Error<<"Unknown logging facility "<<::arg().asNum("logging-facility") <<endl;
}
- L.setLoglevel((Logger::Urgency)(::arg().asNum("loglevel")));
- L.disableSyslog(::arg().mustDo("disable-syslog"));
- L.setTimestamps(::arg().mustDo("log-timestamp"));
- L.toConsole((Logger::Urgency)(::arg().asNum("loglevel")));
+ g_log.setLoglevel((Logger::Urgency)(::arg().asNum("loglevel")));
+ g_log.disableSyslog(::arg().mustDo("disable-syslog"));
+ g_log.setTimestamps(::arg().mustDo("log-timestamp"));
+ g_log.toConsole((Logger::Urgency)(::arg().asNum("loglevel")));
if(::arg().mustDo("help") || ::arg().mustDo("config")) {
::arg().set("daemon")="no";
if(::arg().mustDo("guardian") && !isGuarded(argv)) {
if(::arg().mustDo("daemon")) {
- L.toConsole(Logger::Critical);
+ g_log.toConsole(Logger::Critical);
daemonize();
}
guardian(argc, argv);
#ifdef __GLIBC__
if(!::arg().mustDo("traceback-handler")) {
- L<<Logger::Warning<<"Disabling traceback handler"<<endl;
+ g_log<<Logger::Warning<<"Disabling traceback handler"<<endl;
signal(SIGSEGV,SIG_DFL);
signal(SIGFPE,SIG_DFL);
signal(SIGABRT,SIG_DFL);
}
if(!::arg().asNum("local-port")) {
- L<<Logger::Error<<"Unable to launch, binding to no port or port 0 makes no sense"<<endl;
+ g_log<<Logger::Error<<"Unable to launch, binding to no port or port 0 makes no sense"<<endl;
exit(99); // this isn't going to fix itself either
}
if(!BackendMakers().numLauncheable()) {
- L<<Logger::Error<<"Unable to launch, no backends configured for querying"<<endl;
+ g_log<<Logger::Error<<"Unable to launch, no backends configured for querying"<<endl;
exit(99); // this isn't going to fix itself either
}
if(::arg().mustDo("daemon")) {
- L.toConsole(Logger::None);
+ g_log.toConsole(Logger::None);
if(!isGuarded(argv))
daemonize();
}
if(isGuarded(argv)) {
- L<<Logger::Warning<<"This is a guarded instance of pdns"<<endl;
+ g_log<<Logger::Warning<<"This is a guarded instance of pdns"<<endl;
dl=new DynListener; // listens on stdin
}
else {
- L<<Logger::Warning<<"This is a standalone pdns"<<endl;
+ g_log<<Logger::Warning<<"This is a standalone pdns"<<endl;
if(::arg().mustDo("control-console"))
dl=new DynListener();
g_udpReceivers[idx] = std::make_shared<UDPNameserver>(true);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Unable to reuse port, falling back to original bind"<<endl;
+ g_log<<Logger::Error<<"Unable to reuse port, falling back to original bind"<<endl;
break;
}
}
TN=new TCPNameserver;
}
catch(const ArgException &A) {
- L<<Logger::Error<<"Fatal error: "<<A.reason<<endl;
+ g_log<<Logger::Error<<"Fatal error: "<<A.reason<<endl;
exit(1);
}
declareStats();
- DLOG(L<<Logger::Warning<<"Verbose logging in effect"<<endl);
+ DLOG(g_log<<Logger::Warning<<"Verbose logging in effect"<<endl);
showProductVersion();
catch(PDNSException &AE) {
if(!::arg().mustDo("daemon"))
cerr<<"Exiting because: "<<AE.reason<<endl;
- L<<Logger::Error<<"Exiting because: "<<AE.reason<<endl;
+ g_log<<Logger::Error<<"Exiting because: "<<AE.reason<<endl;
}
catch(std::exception &e) {
if(!::arg().mustDo("daemon"))
cerr<<"Exiting because of STL error: "<<e.what()<<endl;
- L<<Logger::Error<<"Exiting because of STL error: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Exiting because of STL error: "<<e.what()<<endl;
}
catch(...) {
cerr<<"Uncaught exception of unknown type - sorry"<<endl;
#include <iostream>
#include "dnsrecords.hh"
#include <boost/utility.hpp>
-#undef L
#include <boost/multi_index_container.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include "validate.hh"
#undef max
-#define L theL()
#include "namespaces.hh"
using namespace ::boost::multi_index;
static void init(bool debug=false)
{
- L.setName("test");
- L.disableSyslog(true);
+ g_log.setName("test");
+ g_log.disableSyslog(true);
if (debug) {
- L.setLoglevel((Logger::Urgency)(6)); // info and up
- L.toConsole(Logger::Info);
+ g_log.setLoglevel((Logger::Urgency)(6)); // info and up
+ g_log.toConsole(Logger::Info);
}
else {
- L.setLoglevel(Logger::None);
- L.toConsole(Logger::Error);
+ g_log.setLoglevel(Logger::None);
+ g_log.toConsole(Logger::Error);
}
seedRandom("/dev/urandom");
ad.d_records.insert(dr);
if(newMap->count(dr.d_name)) {
- L<<Logger::Warning<<"Hosts file will not overwrite zone '"<<dr.d_name<<"' already loaded"<<endl;
+ g_log<<Logger::Warning<<"Hosts file will not overwrite zone '"<<dr.d_name<<"' already loaded"<<endl;
}
else {
- L<<Logger::Warning<<"Inserting forward zone '"<<dr.d_name<<"' based on hosts file"<<endl;
+ g_log<<Logger::Warning<<"Inserting forward zone '"<<dr.d_name<<"' based on hosts file"<<endl;
ad.d_name=dr.d_name;
(*newMap)[ad.d_name]=ad;
}
}
if(newMap->count(dr.d_name)) {
- L<<Logger::Warning<<"Will not overwrite zone '"<<dr.d_name<<"' already loaded"<<endl;
+ g_log<<Logger::Warning<<"Will not overwrite zone '"<<dr.d_name<<"' already loaded"<<endl;
}
else {
if(ipparts.size()==4)
- L<<Logger::Warning<<"Inserting reverse zone '"<<dr.d_name<<"' based on hosts file"<<endl;
+ g_log<<Logger::Warning<<"Inserting reverse zone '"<<dr.d_name<<"' based on hosts file"<<endl;
ad.d_name = dr.d_name;
(*newMap)[ad.d_name]=ad;
}
for(vector<string>::const_iterator iter = servers.begin(); iter != servers.end(); ++iter) {
if(verbose && iter != servers.begin())
- L<<", ";
+ g_log<<", ";
ComboAddress addr=parseIPAndPort(*iter, 53);
if(verbose)
- L<<addr.toStringWithPort();
+ g_log<<addr.toStringWithPort();
ad.d_servers.push_back(addr);
}
if(verbose)
- L<<endl;
+ g_log<<endl;
}
void* pleaseWipeNegCache()
std::shared_ptr<SyncRes::domainmap_t> original=SyncRes::getDomainMap();
try {
- L<<Logger::Warning<<"Reloading zones, purging data from cache"<<endl;
+ g_log<<Logger::Warning<<"Reloading zones, purging data from cache"<<endl;
if (original) {
for(const auto& i : *original) {
return "ok\n";
}
catch(std::exception& e) {
- L<<Logger::Error<<"Encountered error reloading zones, keeping original data: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Encountered error reloading zones, keeping original data: "<<e.what()<<endl;
}
catch(PDNSException& ae) {
- L<<Logger::Error<<"Encountered error reloading zones, keeping original data: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Encountered error reloading zones, keeping original data: "<<ae.reason<<endl;
}
catch(...) {
- L<<Logger::Error<<"Encountered unknown error reloading zones, keeping original data"<<endl;
+ g_log<<Logger::Error<<"Encountered unknown error reloading zones, keeping original data"<<endl;
}
return "reloading failed, see log\n";
}
// headers.first=toCanonic("", headers.first);
if(n==0) {
ad.d_rdForward = false;
- L<<Logger::Error<<"Parsing authoritative data for zone '"<<headers.first<<"' from file '"<<headers.second<<"'"<<endl;
+ g_log<<Logger::Error<<"Parsing authoritative data for zone '"<<headers.first<<"' from file '"<<headers.second<<"'"<<endl;
ZoneParserTNG zpt(headers.second, DNSName(headers.first));
DNSResourceRecord rr;
DNSRecord dr;
}
}
else {
- L<<Logger::Error<<"Redirecting queries for zone '"<<headers.first<<"' ";
+ g_log<<Logger::Error<<"Redirecting queries for zone '"<<headers.first<<"' ";
if(n == 2) {
- L<<"with recursion ";
+ g_log<<"with recursion ";
ad.d_rdForward = true;
}
else ad.d_rdForward = false;
- L<<"to: ";
+ g_log<<"to: ";
convertServersForAD(headers.second, ad, ";");
if(n == 2) {
}
if(!::arg()["forward-zones-file"].empty()) {
- L<<Logger::Warning<<"Reading zone forwarding information from '"<<::arg()["forward-zones-file"]<<"'"<<endl;
+ g_log<<Logger::Warning<<"Reading zone forwarding information from '"<<::arg()["forward-zones-file"]<<"'"<<endl;
SyncRes::AuthDomain ad;
FILE *rfp=fopen(::arg()["forward-zones-file"].c_str(), "r");
ad.d_name = DNSName(domain);
(*newMap)[ad.d_name]=ad;
}
- L<<Logger::Warning<<"Done parsing " << newMap->size() - before<<" forwarding instructions from file '"<<::arg()["forward-zones-file"]<<"'"<<endl;
+ g_log<<Logger::Warning<<"Done parsing " << newMap->size() - before<<" forwarding instructions from file '"<<::arg()["forward-zones-file"]<<"'"<<endl;
}
if(::arg().mustDo("export-etc-hosts")) {
ifstream ifs(fname.c_str());
if(!ifs) {
- L<<Logger::Warning<<"Could not open /etc/hosts for reading"<<endl;
+ g_log<<Logger::Warning<<"Could not open /etc/hosts for reading"<<endl;
}
else {
string searchSuffix = ::arg()["export-etc-hosts-search-suffix"];
}
}
if(::arg().mustDo("serve-rfc1918")) {
- L<<Logger::Warning<<"Inserting rfc 1918 private space zones"<<endl;
+ g_log<<Logger::Warning<<"Inserting rfc 1918 private space zones"<<endl;
parts.clear();
parts.push_back("127");
makeIPToNamesZone(newMap, parts);
}
catch(const std::exception& e) {
#ifdef WE_ARE_RECURSOR
- L<<Logger::Warning<<"Error connecting to remote logger "<<d_remote.toStringWithPort()<<": "<<e.what()<<std::endl;
+ g_log<<Logger::Warning<<"Error connecting to remote logger "<<d_remote.toStringWithPort()<<": "<<e.what()<<std::endl;
#else
warnlog("Error connecting to remote logger %s: %s", d_remote.toStringWithPort(), e.what());
#endif
}
catch(const std::runtime_error& e) {
#ifdef WE_ARE_RECURSOR
- L<<Logger::Info<<"Error sending data to remote logger "<<d_remote.toStringWithPort()<<": "<< e.what()<<endl;
+ g_log<<Logger::Info<<"Error sending data to remote logger "<<d_remote.toStringWithPort()<<": "<< e.what()<<endl;
#else
vinfolog("Error sending data to remote logger (%s): %s", d_remote.toStringWithPort(), e.what());
#endif
QType rrType = QType(rr->d_type);
if (rrType == QType::NSEC || rrType == QType::NSEC3) {
- L<<Logger::Warning<<msgPrefix<<"Trying to add/update/delete "<<rr->d_name<<"|"<<rrType.getName()<<". These are generated records, ignoring!"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"Trying to add/update/delete "<<rr->d_name<<"|"<<rrType.getName()<<". These are generated records, ignoring!"<<endl;
return 0;
}
if (!isPresigned && ((!::arg().mustDo("direct-dnskey") && rrType == QType::DNSKEY) || rrType == QType::RRSIG)) {
- L<<Logger::Warning<<msgPrefix<<"Trying to add/update/delete "<<rr->d_name<<"|"<<rrType.getName()<<" in non-presigned zone, ignoring!"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"Trying to add/update/delete "<<rr->d_name<<"|"<<rrType.getName()<<" in non-presigned zone, ignoring!"<<endl;
return 0;
}
if ((rrType == QType::NSEC3PARAM || rrType == QType::DNSKEY) && rr->d_name != di->zone) {
- L<<Logger::Warning<<msgPrefix<<"Trying to add/update/delete "<<rr->d_name<<"|"<<rrType.getName()<<", "<<rrType.getName()<<" must be at zone apex, ignoring!"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"Trying to add/update/delete "<<rr->d_name<<"|"<<rrType.getName()<<", "<<rrType.getName()<<" must be at zone apex, ignoring!"<<endl;
return 0;
}
if (rr->d_class == QClass::IN) { // 3.4.2.2 QClass::IN means insert or update
- DLOG(L<<msgPrefix<<"Add/Update record (QClass == IN) "<<rr->d_name<<"|"<<rrType.getName()<<endl);
+ DLOG(g_log<<msgPrefix<<"Add/Update record (QClass == IN) "<<rr->d_name<<"|"<<rrType.getName()<<endl);
if (rrType == QType::NSEC3PARAM) {
- L<<Logger::Notice<<msgPrefix<<"Adding/updating NSEC3PARAM for zone, resetting ordernames."<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Adding/updating NSEC3PARAM for zone, resetting ordernames."<<endl;
NSEC3PARAMRecordContent nsec3param(rr->d_content->getZoneRepresentation(), di->zone.toString() /* FIXME400 huh */);
*narrow = false; // adding a NSEC3 will cause narrow mode to be dropped, as you cannot specify that in a NSEC3PARAM record
di->backend->replaceRRSet(di->id, oldRec->qname, oldRec->qtype, rrset);
*updatedSerial = true;
changedRecords++;
- L<<Logger::Notice<<msgPrefix<<"Replacing record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Replacing record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
} else {
- L<<Logger::Notice<<msgPrefix<<"Provided serial ("<<sdUpdate.serial<<") is older than the current serial ("<<sdOld.serial<<"), ignoring SOA update."<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Provided serial ("<<sdUpdate.serial<<") is older than the current serial ("<<sdOld.serial<<"), ignoring SOA update."<<endl;
}
// It's not possible to have multiple CNAME's with the same NAME. So we always update.
}
if (changedCNames > 0) {
di->backend->replaceRRSet(di->id, rr->d_name, rrType, rrset);
- L<<Logger::Notice<<msgPrefix<<"Replacing record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Replacing record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
changedRecords += changedCNames;
} else {
- L<<Logger::Notice<<msgPrefix<<"Replace for record "<<rr->d_name<<"|"<<rrType.getName()<<" requested, but no changes made."<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Replace for record "<<rr->d_name<<"|"<<rrType.getName()<<" requested, but no changes made."<<endl;
}
// In any other case, we must check if the TYPE and RDATA match to provide an update (which effectively means a update of TTL)
}
if (updateTTL > 0) {
di->backend->replaceRRSet(di->id, rr->d_name, rrType, rrset);
- L<<Logger::Notice<<msgPrefix<<"Replacing record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Replacing record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
changedRecords += updateTTL;
} else {
- L<<Logger::Notice<<msgPrefix<<"Replace for record "<<rr->d_name<<"|"<<rrType.getName()<<" requested, but no changes made."<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Replace for record "<<rr->d_name<<"|"<<rrType.getName()<<" requested, but no changes made."<<endl;
}
}
// If we haven't found a record that matches, we must add it.
if (! foundRecord) {
- L<<Logger::Notice<<msgPrefix<<"Adding record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Adding record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
delnonterm.insert(rr->d_name); // always remove any ENT's in the place where we're going to add a record.
auto newRec = DNSResourceRecord::fromWire(*rr);
newRec.domain_id = di->id;
// If we insert an NS, all the records below it become non auth - so, we're inserting a delegate.
// Auth can only be false when the rr->d_name is not the zone
if (auth == false && rrType == QType::NS) {
- DLOG(L<<msgPrefix<<"Going to fix auth flags below "<<rr->d_name<<endl);
+ DLOG(g_log<<msgPrefix<<"Going to fix auth flags below "<<rr->d_name<<endl);
insnonterm.clear(); // No ENT's are needed below delegates (auth=0)
vector<DNSName> qnames;
di->backend->listSubZone(rr->d_name, di->id);
// Delete records - section 3.4.2.3 and 3.4.2.4 with the exception of the 'always leave 1 NS rule' as that's handled by
// the code that calls this performUpdate().
if ((rr->d_class == QClass::ANY || rr->d_class == QClass::NONE) && rrType != QType::SOA) { // never delete a SOA.
- DLOG(L<<msgPrefix<<"Deleting records: "<<rr->d_name<<"; QClass:"<<rr->d_class<<"; rrType: "<<rrType.getName()<<endl);
+ DLOG(g_log<<msgPrefix<<"Deleting records: "<<rr->d_name<<"; QClass:"<<rr->d_class<<"; rrType: "<<rrType.getName()<<endl);
if (rrType == QType::NSEC3PARAM) {
- L<<Logger::Notice<<msgPrefix<<"Deleting NSEC3PARAM from zone, resetting ordernames."<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Deleting NSEC3PARAM from zone, resetting ordernames."<<endl;
if (rr->d_class == QClass::ANY)
d_dk.unsetNSEC3PARAM(rr->d_name);
else if (rr->d_class == QClass::NONE) {
if (recordsToDelete.size()) {
di->backend->replaceRRSet(di->id, rr->d_name, rrType, rrset);
- L<<Logger::Notice<<msgPrefix<<"Deleting record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Deleting record "<<rr->d_name<<"|"<<rrType.getName()<<endl;
changedRecords += recordsToDelete.size();
}
}
} else { // if (recordsToDelete.size())
- L<<Logger::Notice<<msgPrefix<<"Deletion for record "<<rr->d_name<<"|"<<rrType.getName()<<" requested, but not found."<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Deletion for record "<<rr->d_name<<"|"<<rrType.getName()<<" requested, but not found."<<endl;
}
} // (End of delete block d_class == ANY || d_class == NONE
//Insert and delete ENT's
if (insnonterm.size() > 0 || delnonterm.size() > 0) {
- DLOG(L<<msgPrefix<<"Updating ENT records - "<<insnonterm.size()<<"|"<<delnonterm.size()<<endl);
+ DLOG(g_log<<msgPrefix<<"Updating ENT records - "<<insnonterm.size()<<"|"<<delnonterm.size()<<endl);
di->backend->updateEmptyNonTerminals(di->id, insnonterm, delnonterm, false);
for (const auto &i: insnonterm) {
string hashed;
B.getDomainMetadata(p->qdomain, "FORWARD-DNSUPDATE", forward);
if (forward.size() == 0 && ! ::arg().mustDo("forward-dnsupdate")) {
- L<<Logger::Notice<<msgPrefix<<"Not configured to forward to master, returning Refused."<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Not configured to forward to master, returning Refused."<<endl;
return RCode::Refused;
}
for(vector<string>::const_iterator master=di->masters.begin(); master != di->masters.end(); master++) {
- L<<Logger::Notice<<msgPrefix<<"Forwarding packet to master "<<*master<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"Forwarding packet to master "<<*master<<endl;
ComboAddress remote;
try {
remote = ComboAddress(*master, 53);
}
catch (...) {
- L<<Logger::Error<<msgPrefix<<"Failed to parse "<<*master<<" as valid remote."<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Failed to parse "<<*master<<" as valid remote."<<endl;
continue;
}
}
int sock = makeQuerySocket(local, false); // create TCP socket. RFC2136 section 6.2 seems to be ok with this.
if(sock < 0) {
- L<<Logger::Error<<msgPrefix<<"Error creating socket: "<<stringerror()<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Error creating socket: "<<stringerror()<<endl;
continue;
}
if( connect(sock, (struct sockaddr*)&remote, remote.getSocklen()) < 0 ) {
- L<<Logger::Error<<msgPrefix<<"Failed to connect to "<<remote.toStringWithPort()<<": "<<stringerror()<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Failed to connect to "<<remote.toStringWithPort()<<": "<<stringerror()<<endl;
try {
closesocket(sock);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing master forwarding socket after connect() failed: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing master forwarding socket after connect() failed: "<<e.reason<<endl;
}
continue;
}
string buffer((const char*)&len, 2);
buffer.append(forwardPacket.getString());
if(write(sock, buffer.c_str(), buffer.length()) < 0) {
- L<<Logger::Error<<msgPrefix<<"Unable to forward update message to "<<remote.toStringWithPort()<<", error:"<<stringerror()<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Unable to forward update message to "<<remote.toStringWithPort()<<", error:"<<stringerror()<<endl;
try {
closesocket(sock);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing master forwarding socket after write() failed: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing master forwarding socket after write() failed: "<<e.reason<<endl;
}
continue;
}
int res = waitForData(sock, 10, 0);
if (!res) {
- L<<Logger::Error<<msgPrefix<<"Timeout waiting for reply from master at "<<remote.toStringWithPort()<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Timeout waiting for reply from master at "<<remote.toStringWithPort()<<endl;
try {
closesocket(sock);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing master forwarding socket after a timeout occured: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing master forwarding socket after a timeout occured: "<<e.reason<<endl;
}
continue;
}
if (res < 0) {
- L<<Logger::Error<<msgPrefix<<"Error waiting for answer from master at "<<remote.toStringWithPort()<<", error:"<<stringerror()<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Error waiting for answer from master at "<<remote.toStringWithPort()<<", error:"<<stringerror()<<endl;
try {
closesocket(sock);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing master forwarding socket after an error occured: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing master forwarding socket after an error occured: "<<e.reason<<endl;
}
continue;
}
ssize_t recvRes;
recvRes = recv(sock, &lenBuf, sizeof(lenBuf), 0);
if (recvRes < 0 || static_cast<size_t>(recvRes) < sizeof(lenBuf)) {
- L<<Logger::Error<<msgPrefix<<"Could not receive data (length) from master at "<<remote.toStringWithPort()<<", error:"<<stringerror()<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Could not receive data (length) from master at "<<remote.toStringWithPort()<<", error:"<<stringerror()<<endl;
try {
closesocket(sock);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing master forwarding socket after recv() failed: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing master forwarding socket after recv() failed: "<<e.reason<<endl;
}
continue;
}
buffer.resize(packetLen);
recvRes = recv(sock, &buffer.at(0), packetLen, 0);
if (recvRes < 0) {
- L<<Logger::Error<<msgPrefix<<"Could not receive data (dnspacket) from master at "<<remote.toStringWithPort()<<", error:"<<stringerror()<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Could not receive data (dnspacket) from master at "<<remote.toStringWithPort()<<", error:"<<stringerror()<<endl;
try {
closesocket(sock);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing master forwarding socket after recv() failed: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing master forwarding socket after recv() failed: "<<e.reason<<endl;
}
continue;
}
closesocket(sock);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing master forwarding socket: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing master forwarding socket: "<<e.reason<<endl;
}
try {
MOADNSParser mdp(false, buffer.data(), static_cast<unsigned int>(recvRes));
- L<<Logger::Info<<msgPrefix<<"Forward update message to "<<remote.toStringWithPort()<<", result was RCode "<<mdp.d_header.rcode<<endl;
+ g_log<<Logger::Info<<msgPrefix<<"Forward update message to "<<remote.toStringWithPort()<<", result was RCode "<<mdp.d_header.rcode<<endl;
return mdp.d_header.rcode;
}
catch (...) {
- L<<Logger::Error<<msgPrefix<<"Failed to parse response packet from master at "<<remote.toStringWithPort()<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Failed to parse response packet from master at "<<remote.toStringWithPort()<<endl;
continue;
}
}
- L<<Logger::Error<<msgPrefix<<"Failed to forward packet to master(s). Returning ServFail."<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Failed to forward packet to master(s). Returning ServFail."<<endl;
return RCode::ServFail;
}
return RCode::Refused;
string msgPrefix="UPDATE (" + itoa(p->d.id) + ") from " + p->getRemote().toString() + " for " + p->qdomain.toLogString() + ": ";
- L<<Logger::Info<<msgPrefix<<"Processing started."<<endl;
+ g_log<<Logger::Info<<msgPrefix<<"Processing started."<<endl;
// if there is policy, we delegate all checks to it
if (this->d_update_policy_lua == NULL) {
ng.addMask(*i);
if ( ! ng.match(&p->d_remote)) {
- L<<Logger::Error<<msgPrefix<<"Remote not listed in allow-dnsupdate-from or domainmetadata. Sending REFUSED"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Remote not listed in allow-dnsupdate-from or domainmetadata. Sending REFUSED"<<endl;
return RCode::Refused;
}
DNSName inputkey;
string message;
if (! p->getTSIGDetails(&trc, &inputkey)) {
- L<<Logger::Error<<msgPrefix<<"TSIG key required, but packet does not contain key. Sending REFUSED"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"TSIG key required, but packet does not contain key. Sending REFUSED"<<endl;
return RCode::Refused;
}
}
if (!validKey) {
- L<<Logger::Error<<msgPrefix<<"TSIG key ("<<inputkey<<") required, but no matching key found in domainmetadata, tried "<<tsigKeys.size()<<". Sending REFUSED"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"TSIG key ("<<inputkey<<") required, but no matching key found in domainmetadata, tried "<<tsigKeys.size()<<". Sending REFUSED"<<endl;
return RCode::Refused;
}
}
if (tsigKeys.size() == 0 && p->d_havetsig)
- L<<Logger::Warning<<msgPrefix<<"TSIG is provided, but domain is not secured with TSIG. Processing continues"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"TSIG is provided, but domain is not secured with TSIG. Processing continues"<<endl;
}
// variable names during the use of our MOADNSParser.
MOADNSParser mdp(false, p->getString());
if (mdp.d_header.qdcount != 1) {
- L<<Logger::Warning<<msgPrefix<<"Zone Count is not 1, sending FormErr"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"Zone Count is not 1, sending FormErr"<<endl;
return RCode::FormErr;
}
if (p->qtype.getCode() != QType::SOA) { // RFC2136 2.3 - ZTYPE must be SOA
- L<<Logger::Warning<<msgPrefix<<"Query ZTYPE is not SOA, sending FormErr"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"Query ZTYPE is not SOA, sending FormErr"<<endl;
return RCode::FormErr;
}
if (p->qclass != QClass::IN) {
- L<<Logger::Warning<<msgPrefix<<"Class is not IN, sending NotAuth"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"Class is not IN, sending NotAuth"<<endl;
return RCode::NotAuth;
}
DomainInfo di;
di.backend=0;
if(!B.getDomainInfo(p->qdomain, di) || !di.backend) {
- L<<Logger::Error<<msgPrefix<<"Can't determine backend for domain '"<<p->qdomain<<"' (or backend does not support DNS update operation)"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Can't determine backend for domain '"<<p->qdomain<<"' (or backend does not support DNS update operation)"<<endl;
return RCode::NotAuth;
}
continue;
if (!rr->d_name.isPartOf(di.zone)) {
- L<<Logger::Error<<msgPrefix<<"Received update/record out of zone, sending NotZone."<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Received update/record out of zone, sending NotZone."<<endl;
return RCode::NotZone;
}
}
Lock l(&s_rfc2136lock); //TODO: i think this lock can be per zone, not for everything
- L<<Logger::Info<<msgPrefix<<"starting transaction."<<endl;
+ g_log<<Logger::Info<<msgPrefix<<"starting transaction."<<endl;
if (!di.backend->startTransaction(p->qdomain, -1)) { // Not giving the domain_id means that we do not delete the existing records.
- L<<Logger::Error<<msgPrefix<<"Backend for domain "<<p->qdomain<<" does not support transaction. Can't do Update packet."<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Backend for domain "<<p->qdomain<<" does not support transaction. Can't do Update packet."<<endl;
return RCode::NotImp;
}
if (rr->d_place == DNSResourceRecord::ANSWER) {
int res = checkUpdatePrerequisites(rr, &di);
if (res>0) {
- L<<Logger::Error<<msgPrefix<<"Failed PreRequisites check, returning "<<RCode::to_s(res)<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Failed PreRequisites check, returning "<<RCode::to_s(res)<<endl;
di.backend->abortTransaction();
return res;
}
}
}
if (matchRR != foundRR || foundRR != vec->size()) {
- L<<Logger::Error<<msgPrefix<<"Failed PreRequisites check (RRs differ), returning NXRRSet"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Failed PreRequisites check (RRs differ), returning NXRRSet"<<endl;
di.backend->abortTransaction();
return RCode::NXRRSet;
}
if (rr->d_place == DNSResourceRecord::AUTHORITY) {
int res = checkUpdatePrescan(rr);
if (res>0) {
- L<<Logger::Error<<msgPrefix<<"Failed prescan check, returning "<<res<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Failed prescan check, returning "<<res<<endl;
di.backend->abortTransaction();
return res;
}
}
for (auto const &n : cn) {
if (nocn.count(n) > 0) {
- L<<Logger::Error<<msgPrefix<<"Refusing update, found CNAME and non-CNAME addition"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Refusing update, found CNAME and non-CNAME addition"<<endl;
di.backend->abortTransaction();
return RCode::FormErr;
}
/* see if it's permitted by policy */
if (this->d_update_policy_lua != NULL) {
if (this->d_update_policy_lua->updatePolicy(rr->d_name, QType(rr->d_type), di.zone, p) == false) {
- L<<Logger::Warning<<msgPrefix<<"Refusing update for " << rr->d_name << "/" << QType(rr->d_type).getName() << ": Not permitted by policy"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"Refusing update for " << rr->d_name << "/" << QType(rr->d_type).getName() << ": Not permitted by policy"<<endl;
continue;
} else {
- L<<Logger::Debug<<msgPrefix<<"Accepting update for " << rr->d_name << "/" << QType(rr->d_type).getName() << ": Permitted by policy"<<endl;
+ g_log<<Logger::Debug<<msgPrefix<<"Accepting update for " << rr->d_name << "/" << QType(rr->d_type).getName() << ": Permitted by policy"<<endl;
}
}
di.backend->lookup(QType(QType::ANY), rr->d_name);
while (di.backend->get(rec)) {
if (rec.qtype != QType::CNAME && rec.qtype != QType::RRSIG) {
- L<<Logger::Warning<<msgPrefix<<"Refusing update for " << rr->d_name << "/" << QType(rr->d_type).getName() << ": Data other than CNAME exists for the same name"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"Refusing update for " << rr->d_name << "/" << QType(rr->d_type).getName() << ": Data other than CNAME exists for the same name"<<endl;
di.backend->abortTransaction();
return RCode::Refused;
}
di.backend->lookup(QType(QType::CNAME), rr->d_name);
while (di.backend->get(rec)) {
if (rec.qtype == QType::CNAME && rr->d_type != QType::RRSIG) {
- L<<Logger::Warning<<msgPrefix<<"Refusing update for " << rr->d_name << "/" << QType(rr->d_type).getName() << ": CNAME exists for the same name"<<endl;
+ g_log<<Logger::Warning<<msgPrefix<<"Refusing update for " << rr->d_name << "/" << QType(rr->d_type).getName() << ": CNAME exists for the same name"<<endl;
di.backend->abortTransaction();
return RCode::Refused;
}
if (changedRecords > 0) {
if (!di.backend->commitTransaction()) {
- L<<Logger::Error<<msgPrefix<<"Failed to commit updates!"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Failed to commit updates!"<<endl;
return RCode::ServFail;
}
}
}
- L<<Logger::Info<<msgPrefix<<"Update completed, "<<changedRecords<<" changed records committed."<<endl;
+ g_log<<Logger::Info<<msgPrefix<<"Update completed, "<<changedRecords<<" changed records committed."<<endl;
} else {
//No change, no commit, we perform abort() because some backends might like this more.
- L<<Logger::Info<<msgPrefix<<"Update completed, 0 changes, rolling back."<<endl;
+ g_log<<Logger::Info<<msgPrefix<<"Update completed, 0 changes, rolling back."<<endl;
di.backend->abortTransaction();
}
return RCode::NoError; //rfc 2136 3.4.2.5
}
catch (SSqlException &e) {
- L<<Logger::Error<<msgPrefix<<"Caught SSqlException: "<<e.txtReason()<<"; Sending ServFail!"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Caught SSqlException: "<<e.txtReason()<<"; Sending ServFail!"<<endl;
di.backend->abortTransaction();
return RCode::ServFail;
}
catch (DBException &e) {
- L<<Logger::Error<<msgPrefix<<"Caught DBException: "<<e.reason<<"; Sending ServFail!"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Caught DBException: "<<e.reason<<"; Sending ServFail!"<<endl;
di.backend->abortTransaction();
return RCode::ServFail;
}
catch (PDNSException &e) {
- L<<Logger::Error<<msgPrefix<<"Caught PDNSException: "<<e.reason<<"; Sending ServFail!"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Caught PDNSException: "<<e.reason<<"; Sending ServFail!"<<endl;
di.backend->abortTransaction();
return RCode::ServFail;
}
catch(std::exception &e) {
- L<<Logger::Error<<msgPrefix<<"Caught std:exception: "<<e.what()<<"; Sending ServFail!"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Caught std:exception: "<<e.what()<<"; Sending ServFail!"<<endl;
di.backend->abortTransaction();
return RCode::ServFail;
}
catch (...) {
- L<<Logger::Error<<msgPrefix<<"Caught unknown exception when performing update. Sending ServFail!"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Caught unknown exception when performing update. Sending ServFail!"<<endl;
di.backend->abortTransaction();
return RCode::ServFail;
}
uint32_t oldSerial = sd.serial;
if (oldSerial == 0) { // using Autoserial, leave the serial alone.
- L<<Logger::Notice<<msgPrefix<<"AutoSerial in use in domain \""<<di->zone.toLogString()<<"\", not updating SOA serial."<<endl;
+ g_log<<Logger::Notice<<msgPrefix<<"AutoSerial in use in domain \""<<di->zone.toLogString()<<"\", not updating SOA serial."<<endl;
return;
}
string soaEditSetting;
d_dk.getSoaEdit(di->zone, soaEditSetting);
if (soaEditSetting.empty()) {
- L<<Logger::Error<<msgPrefix<<"Using "<<soaEdit2136<<" for SOA-EDIT-DNSUPDATE increase on DNS update, but SOA-EDIT is not set for domain \""<< di->zone.toLogString() <<"\". Using DEFAULT for SOA-EDIT-DNSUPDATE"<<endl;
+ g_log<<Logger::Error<<msgPrefix<<"Using "<<soaEdit2136<<" for SOA-EDIT-DNSUPDATE increase on DNS update, but SOA-EDIT is not set for domain \""<< di->zone.toLogString() <<"\". Using DEFAULT for SOA-EDIT-DNSUPDATE"<<endl;
soaEdit2136 = "DEFAULT";
} else
soaEdit = soaEditSetting;
DNSResourceRecord rr;
if (makeIncreasedSOARecord(sd, soaEdit2136, soaEdit, rr)) {
di->backend->replaceRRSet(di->id, rr.qname, rr.qtype, vector<DNSResourceRecord>(1, rr));
- L << Logger::Notice << msgPrefix << "Increasing SOA serial (" << oldSerial << " -> " << sd.serial << ")" << endl;
+ g_log << Logger::Notice << msgPrefix << "Increasing SOA serial (" << oldSerial << " -> " << sd.serial << ")" << endl;
//Correct ordername + auth flag
if (haveNSEC3) {
else if(!crcTarget.empty() && !crcTarget.isRoot() && crcTarget.getRawLabel(crcTarget.countLabels() - 1).compare(0, rpzPrefix.length(), rpzPrefix) == 0) {
/* this is very likely an higher format number or a configuration error,
let's just ignore it. */
- L<<Logger::Info<<"Discarding unsupported RPZ entry "<<crcTarget<<" for "<<dr.d_name<<endl;
+ g_log<<Logger::Info<<"Discarding unsupported RPZ entry "<<crcTarget<<" for "<<dr.d_name<<endl;
return;
}
else {
shared_ptr<SOARecordContent> loadRPZFromServer(const ComboAddress& master, const DNSName& zoneName, std::shared_ptr<DNSFilterEngine::Zone> zone, boost::optional<DNSFilterEngine::Policy> defpol, uint32_t maxTTL, const TSIGTriplet& tt, size_t maxReceivedBytes, const ComboAddress& localAddress, uint16_t axfrTimeout)
{
- L<<Logger::Warning<<"Loading RPZ zone '"<<zoneName<<"' from "<<master.toStringWithPort()<<endl;
+ g_log<<Logger::Warning<<"Loading RPZ zone '"<<zoneName<<"' from "<<master.toStringWithPort()<<endl;
if(!tt.name.empty())
- L<<Logger::Warning<<"With TSIG key '"<<tt.name<<"' of algorithm '"<<tt.algo<<"'"<<endl;
+ g_log<<Logger::Warning<<"With TSIG key '"<<tt.name<<"' of algorithm '"<<tt.algo<<"'"<<endl;
ComboAddress local(localAddress);
if (local == ComboAddress())
throw PDNSException("Total AXFR time exceeded!");
}
if(last != time(0)) {
- L<<Logger::Info<<"Loaded & indexed "<<nrecords<<" policy records so far"<<endl;
+ g_log<<Logger::Info<<"Loaded & indexed "<<nrecords<<" policy records so far"<<endl;
last=time(0);
}
}
- L<<Logger::Info<<"Done: "<<nrecords<<" policy records active, SOA: "<<sr->getZoneRepresentation()<<endl;
+ g_log<<Logger::Info<<"Done: "<<nrecords<<" policy records active, SOA: "<<sr->getZoneRepresentation()<<endl;
return sr;
}
setRPZZoneNewState(polName, sr->d_st.serial, zone->size(), true);
}
catch(const std::exception& e) {
- theL()<<Logger::Warning<<"Unable to load RPZ zone '"<<zoneName<<"' from '"<<master<<"': '"<<e.what()<<"'. (Will try again in "<<refresh<<" seconds...)"<<endl;
+ g_log<<Logger::Warning<<"Unable to load RPZ zone '"<<zoneName<<"' from '"<<master<<"': '"<<e.what()<<"'. (Will try again in "<<refresh<<" seconds...)"<<endl;
incRPZFailedTransfers(polName);
}
catch(const PDNSException& e) {
- theL()<<Logger::Warning<<"Unable to load RPZ zone '"<<zoneName<<"' from '"<<master<<"': '"<<e.reason<<"'. (Will try again in "<<refresh<<" seconds...)"<<endl;
+ g_log<<Logger::Warning<<"Unable to load RPZ zone '"<<zoneName<<"' from '"<<master<<"': '"<<e.reason<<"'. (Will try again in "<<refresh<<" seconds...)"<<endl;
incRPZFailedTransfers(polName);
}
sleep(refresh);
- L<<Logger::Info<<"Getting IXFR deltas for "<<zoneName<<" from "<<master.toStringWithPort()<<", our serial: "<<getRR<SOARecordContent>(dr)->d_st.serial<<endl;
+ g_log<<Logger::Info<<"Getting IXFR deltas for "<<zoneName<<" from "<<master.toStringWithPort()<<", our serial: "<<getRR<SOARecordContent>(dr)->d_st.serial<<endl;
vector<pair<vector<DNSRecord>, vector<DNSRecord> > > deltas;
ComboAddress local(localAddress);
try {
deltas = getIXFRDeltas(master, zoneName, dr, tt, &local, maxReceivedBytes);
} catch(std::runtime_error& e ){
- L<<Logger::Warning<<e.what()<<endl;
+ g_log<<Logger::Warning<<e.what()<<endl;
incRPZFailedTransfers(polName);
continue;
}
if(deltas.empty())
continue;
- L<<Logger::Info<<"Processing "<<deltas.size()<<" delta"<<addS(deltas)<<" for RPZ "<<zoneName<<endl;
+ g_log<<Logger::Info<<"Processing "<<deltas.size()<<" delta"<<addS(deltas)<<" for RPZ "<<zoneName<<endl;
auto luaconfsLocal = g_luaconfs.getLocal();
const std::shared_ptr<DNSFilterEngine::Zone> oldZone = luaconfsLocal->dfe.getZone(zoneIdx);
const auto& remove = delta.first;
const auto& add = delta.second;
if(remove.empty()) {
- L<<Logger::Warning<<"IXFR update is a whole new zone"<<endl;
+ g_log<<Logger::Warning<<"IXFR update is a whole new zone"<<endl;
newZone->clear();
fullUpdate = true;
}
// cout<<"Got good removal of SOA serial "<<oldsr->d_st.serial<<endl;
}
else
- L<<Logger::Error<<"GOT WRONG SOA SERIAL REMOVAL, SHOULD TRIGGER WHOLE RELOAD"<<endl;
+ g_log<<Logger::Error<<"GOT WRONG SOA SERIAL REMOVAL, SHOULD TRIGGER WHOLE RELOAD"<<endl;
}
else {
totremove++;
- L<<(g_logRPZChanges ? Logger::Info : Logger::Debug)<<"Had removal of "<<rr.d_name<<" from RPZ zone "<<zoneName<<endl;
+ g_log<<(g_logRPZChanges ? Logger::Info : Logger::Debug)<<"Had removal of "<<rr.d_name<<" from RPZ zone "<<zoneName<<endl;
RPZRecordToPolicy(rr, newZone, false, defpol, maxTTL);
}
}
continue;
if(rr.d_type == QType::SOA) {
auto newsr = getRR<SOARecordContent>(rr);
- // L<<Logger::Info<<"New SOA serial for "<<zoneName<<": "<<newsr->d_st.serial<<endl;
+ // g_log<<Logger::Info<<"New SOA serial for "<<zoneName<<": "<<newsr->d_st.serial<<endl;
if (newsr) {
sr = newsr;
}
}
else {
totadd++;
- L<<(g_logRPZChanges ? Logger::Info : Logger::Debug)<<"Had addition of "<<rr.d_name<<" to RPZ zone "<<zoneName<<endl;
+ g_log<<(g_logRPZChanges ? Logger::Info : Logger::Debug)<<"Had addition of "<<rr.d_name<<" to RPZ zone "<<zoneName<<endl;
RPZRecordToPolicy(rr, newZone, true, defpol, maxTTL);
}
}
}
- L<<Logger::Info<<"Had "<<totremove<<" RPZ removal"<<addS(totremove)<<", "<<totadd<<" addition"<<addS(totadd)<<" for "<<zoneName<<" New serial: "<<sr->d_st.serial<<endl;
+ g_log<<Logger::Info<<"Had "<<totremove<<" RPZ removal"<<addS(totremove)<<", "<<totadd<<" addition"<<addS(totadd)<<" for "<<zoneName<<" New serial: "<<sr->d_st.serial<<endl;
newZone->setSerial(sr->d_st.serial);
setRPZZoneNewState(polName, sr->d_st.serial, newZone->size(), fullUpdate);
else {
string pkgv(PACKAGEVERSION);
if(pkgv.find("0.0.") != 0)
- L<<Logger::Warning<<"Could not retrieve security status update for '" + pkgv + "' on '"+query+"', RCODE = "<< RCode::to_s(res)<<endl;
+ g_log<<Logger::Warning<<"Could not retrieve security status update for '" + pkgv + "' on '"+query+"', RCODE = "<< RCode::to_s(res)<<endl;
else
- L<<Logger::Warning<<"Not validating response for security status update, this is a non-release version."<<endl;
+ g_log<<Logger::Warning<<"Not validating response for security status update, this is a non-release version."<<endl;
}
if(security_status == 1 && first) {
- L<<Logger::Warning << "Polled security status of version "<<PACKAGEVERSION<<" at startup, no known issues reported: " <<g_security_message<<endl;
+ g_log<<Logger::Warning << "Polled security status of version "<<PACKAGEVERSION<<" at startup, no known issues reported: " <<g_security_message<<endl;
}
if(security_status == 2) {
- L<<Logger::Error<<"PowerDNS Security Update Recommended: "<<g_security_message<<endl;
+ g_log<<Logger::Error<<"PowerDNS Security Update Recommended: "<<g_security_message<<endl;
}
else if(security_status == 3) {
- L<<Logger::Error<<"PowerDNS Security Update Mandatory: "<<g_security_message<<endl;
+ g_log<<Logger::Error<<"PowerDNS Security Update Mandatory: "<<g_security_message<<endl;
}
S.set("security-status",security_status);
}
if(state == Bogus) {
- L<<Logger::Error<<"Could not retrieve security status update for '" +pkgv+ "' on '"<<query<<"', DNSSEC validation result was Bogus!"<<endl;
+ g_log<<Logger::Error<<"Could not retrieve security status update for '" +pkgv+ "' on '"<<query<<"', DNSSEC validation result was Bogus!"<<endl;
if(g_security_status == 1) // If we were OK, go to unknown
g_security_status = 0;
return;
}
else {
if(pkgv.find("0.0.") != 0)
- L<<Logger::Warning<<"Could not retrieve security status update for '" +pkgv+ "' on '"<<query<<"', RCODE = "<< RCode::to_s(res)<<endl;
+ g_log<<Logger::Warning<<"Could not retrieve security status update for '" +pkgv+ "' on '"<<query<<"', RCODE = "<< RCode::to_s(res)<<endl;
else
- L<<Logger::Warning<<"Ignoring response for security status update, this is a non-release version."<<endl;
+ g_log<<Logger::Warning<<"Ignoring response for security status update, this is a non-release version."<<endl;
if(g_security_status == 1) // it was ok, now it is unknown
g_security_status = 0;
}
if(g_security_status == 2) {
- L<<Logger::Error<<"PowerDNS Security Update Recommended: "<<g_security_message<<endl;
+ g_log<<Logger::Error<<"PowerDNS Security Update Recommended: "<<g_security_message<<endl;
}
else if(g_security_status == 3) {
- L<<Logger::Error<<"PowerDNS Security Update Mandatory: "<<g_security_message<<endl;
+ g_log<<Logger::Error<<"PowerDNS Security Update Mandatory: "<<g_security_message<<endl;
}
}
if (old_serial < inception)
return inception;
} else if(!kind.empty()) {
- L<<Logger::Warning<<"SOA-EDIT type '"<<kind<<"' for zone "<<zonename<<" is unknown."<<endl;
+ g_log<<Logger::Warning<<"SOA-EDIT type '"<<kind<<"' for zone "<<zonename<<" is unknown."<<endl;
}
return old_serial;
}
}
return new_serial;
} else if(!increaseKind.empty()) {
- L<<Logger::Warning<<"SOA-EDIT-API/DNSUPDATE type '"<<increaseKind<<"' for zone "<<zonename<<" is unknown."<<endl;
+ g_log<<Logger::Warning<<"SOA-EDIT-API/DNSUPDATE type '"<<increaseKind<<"' for zone "<<zonename<<" is unknown."<<endl;
}
return old_serial;
}
return nullptr;
}
catch(...) {
- L<<Logger::Error<<"Unknown exception in signing thread occurred"<<endl;
+ g_log<<Logger::Error<<"Unknown exception in signing thread occurred"<<endl;
return nullptr;
}
}
catch(const PDNSException& pe)
{
- L<<Logger::Error<<"Signing thread died because of PDNSException: "<<pe.reason<<endl;
+ g_log<<Logger::Error<<"Signing thread died because of PDNSException: "<<pe.reason<<endl;
close(fd);
}
catch(const std::exception& e)
{
- L<<Logger::Error<<"Signing thread died because of std::exception: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Signing thread died because of std::exception: "<<e.what()<<endl;
close(fd);
}
DNSSECKeeper dk (&B); // reuse our UeberBackend copy for DNSSECKeeper
if(!B.getDomainInfo(domain, di) || !di.backend || di.kind != DomainInfo::Slave) { // di.backend and B are mostly identical
- L<<Logger::Error<<"Can't determine backend for domain '"<<domain<<"'"<<endl;
+ g_log<<Logger::Error<<"Can't determine backend for domain '"<<domain<<"'"<<endl;
return;
}
}
}
catch(std::exception& p) {
- L<<Logger::Error<<"Got exception during IXFR: "<<p.what()<<endl;
+ g_log<<Logger::Error<<"Got exception during IXFR: "<<p.what()<<endl;
throw;
}
catch(PDNSException& p) {
- L<<Logger::Error<<"Got exception during IXFR: "<<p.reason<<endl;
+ g_log<<Logger::Error<<"Got exception during IXFR: "<<p.reason<<endl;
throw;
}
}
bool soa_received {false};
while(retriever.getChunk(recs)) {
if(first) {
- L<<Logger::Error<<"AXFR started for '"<<domain<<"'"<<endl;
+ g_log<<Logger::Error<<"AXFR started for '"<<domain<<"'"<<endl;
first=false;
}
continue;
if(!i->qname.isPartOf(domain)) {
- L<<Logger::Error<<"Remote "<<raddr.toStringWithPort()<<" tried to sneak in out-of-zone data '"<<i->qname<<"'|"<<i->qtype.getName()<<" during AXFR of zone '"<<domain<<"', ignoring"<<endl;
+ g_log<<Logger::Error<<"Remote "<<raddr.toStringWithPort()<<" tried to sneak in out-of-zone data '"<<i->qname<<"'|"<<i->qtype.getName()<<" during AXFR of zone '"<<domain<<"', ignoring"<<endl;
continue;
}
for(DNSResourceRecord& rr : out) {
if(!rr.qname.isPartOf(domain)) {
- L<<Logger::Error<<"Lua axfrfilter() filter tried to sneak in out-of-zone data '"<<i->qname<<"'|"<<i->qtype.getName()<<" during AXFR of zone '"<<domain<<"', ignoring"<<endl;
+ g_log<<Logger::Error<<"Lua axfrfilter() filter tried to sneak in out-of-zone data '"<<i->qname<<"'|"<<i->qtype.getName()<<" during AXFR of zone '"<<domain<<"', ignoring"<<endl;
continue;
}
if(!processRecordForZS(domain, firstNSEC3, rr, zs))
}
RemoveSentinel rs(domain, this); // this removes us from d_inprogress when we go out of scope
- L<<Logger::Error<<"Initiating transfer of '"<<domain<<"' from remote '"<<remote<<"'"<<endl;
+ g_log<<Logger::Error<<"Initiating transfer of '"<<domain<<"' from remote '"<<remote<<"'"<<endl;
UeberBackend B; // fresh UeberBackend
DomainInfo di;
DNSSECKeeper dk (&B); // reuse our UeberBackend copy for DNSSECKeeper
if(!B.getDomainInfo(domain, di) || !di.backend || di.kind != DomainInfo::Slave) { // di.backend and B are mostly identical
- L<<Logger::Error<<"Can't determine backend for domain '"<<domain<<"'"<<endl;
+ g_log<<Logger::Error<<"Can't determine backend for domain '"<<domain<<"'"<<endl;
return;
}
ZoneStatus zs;
string tsigsecret64;
if(B.getTSIGKey(tt.name, &tt.algo, &tsigsecret64)) {
if(B64Decode(tsigsecret64, tt.secret)) {
- L<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<tt.name<<"' for domain '"<<domain<<"' not found"<<endl;
+ g_log<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<tt.name<<"' for domain '"<<domain<<"' not found"<<endl;
return;
}
} else {
- L<<Logger::Error<<"TSIG key '"<<tt.name<<"' for domain '"<<domain<<"' not found"<<endl;
+ g_log<<Logger::Error<<"TSIG key '"<<tt.name<<"' for domain '"<<domain<<"' not found"<<endl;
return;
}
}
try {
pdl.reset(new AuthLua4());
pdl->loadFile(script);
- L<<Logger::Info<<"Loaded Lua script '"<<script<<"' to edit the incoming AXFR of '"<<domain<<"'"<<endl;
+ g_log<<Logger::Info<<"Loaded Lua script '"<<script<<"' to edit the incoming AXFR of '"<<domain<<"'"<<endl;
}
catch(std::exception& e) {
- L<<Logger::Error<<"Failed to load Lua editing script '"<<script<<"' for incoming AXFR of '"<<domain<<"': "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Failed to load Lua editing script '"<<script<<"' for incoming AXFR of '"<<domain<<"': "<<e.what()<<endl;
return;
}
}
if(B.getDomainMetadata(domain, "AXFR-SOURCE", localaddr) && !localaddr.empty()) {
try {
laddr = ComboAddress(localaddr[0]);
- L<<Logger::Info<<"AXFR source for domain '"<<domain<<"' set to "<<localaddr[0]<<endl;
+ g_log<<Logger::Info<<"AXFR source for domain '"<<domain<<"' set to "<<localaddr[0]<<endl;
}
catch(std::exception& e) {
- L<<Logger::Error<<"Failed to load AXFR source '"<<localaddr[0]<<"' for incoming AXFR of '"<<domain<<"': "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Failed to load AXFR source '"<<localaddr[0]<<"' for incoming AXFR of '"<<domain<<"': "<<e.what()<<endl;
return;
}
} else {
laddr = ComboAddress(::arg()["query-local-address6"]);
} else {
bool isv6 = raddr.sin4.sin_family == AF_INET6;
- L<<Logger::Error<<"Unable to AXFR, destination address is IPv" << (isv6 ? "6" : "4") << ", but query-local-address"<< (isv6 ? "6" : "") << " is unset!"<<endl;
+ g_log<<Logger::Error<<"Unable to AXFR, destination address is IPv" << (isv6 ? "6" : "4") << ", but query-local-address"<< (isv6 ? "6" : "") << " is unset!"<<endl;
return;
}
}
B.getDomainMetadata(domain, "IXFR", meta);
if(!meta.empty() && meta[0]=="1") {
vector<DNSRecord> axfr;
- L<<Logger::Warning<<"Starting IXFR of '"<<domain<<"' from remote "<<raddr.toStringWithPort()<<endl;
+ g_log<<Logger::Warning<<"Starting IXFR of '"<<domain<<"' from remote "<<raddr.toStringWithPort()<<endl;
ixfrSuck(domain, tt, laddr, raddr, pdl, zs, &axfr);
if(!axfr.empty()) {
- L<<Logger::Warning<<"IXFR of '"<<domain<<"' from remote '"<<raddr.toStringWithPort()<<"' turned into an AXFR"<<endl;
+ g_log<<Logger::Warning<<"IXFR of '"<<domain<<"' from remote '"<<raddr.toStringWithPort()<<"' turned into an AXFR"<<endl;
bool firstNSEC3=true;
rrs.reserve(axfr.size());
for(const auto& dr : axfr) {
}
}
else {
- L<<Logger::Warning<<"Done with IXFR of '"<<domain<<"' from remote '"<<remote<<"', got "<<zs.numDeltas<<" delta"<<addS(zs.numDeltas)<<", serial now "<<zs.soa_serial<<endl;
+ g_log<<Logger::Warning<<"Done with IXFR of '"<<domain<<"' from remote '"<<remote<<"', got "<<zs.numDeltas<<" delta"<<addS(zs.numDeltas)<<", serial now "<<zs.soa_serial<<endl;
purgeAuthCaches(domain.toString()+"$");
return;
}
}
if(rrs.empty()) {
- L<<Logger::Warning<<"Starting AXFR of '"<<domain<<"' from remote "<<raddr.toStringWithPort()<<endl;
+ g_log<<Logger::Warning<<"Starting AXFR of '"<<domain<<"' from remote "<<raddr.toStringWithPort()<<endl;
rrs = doAxfr(raddr, domain, tt, laddr, pdl, zs);
- L<<Logger::Warning<<"AXFR of '"<<domain<<"' from remote "<<raddr.toStringWithPort()<<" done"<<endl;
+ g_log<<Logger::Warning<<"AXFR of '"<<domain<<"' from remote "<<raddr.toStringWithPort()<<" done"<<endl;
}
if(zs.isNSEC3) {
if(zs.isDnssecZone) {
if(!zs.isNSEC3)
- L<<Logger::Info<<"Adding NSEC ordering information"<<endl;
+ g_log<<Logger::Info<<"Adding NSEC ordering information"<<endl;
else if(!zs.isNarrow)
- L<<Logger::Info<<"Adding NSEC3 hashed ordering information for '"<<domain<<"'"<<endl;
+ g_log<<Logger::Info<<"Adding NSEC3 hashed ordering information for '"<<domain<<"'"<<endl;
else
- L<<Logger::Info<<"Erasing NSEC3 ordering since we are narrow, only setting 'auth' fields"<<endl;
+ g_log<<Logger::Info<<"Erasing NSEC3 ordering since we are narrow, only setting 'auth' fields"<<endl;
}
transaction=di.backend->startTransaction(domain, zs.domain_id);
- L<<Logger::Error<<"Backend transaction started for '"<<domain<<"' storage"<<endl;
+ g_log<<Logger::Error<<"Backend transaction started for '"<<domain<<"' storage"<<endl;
// update the presigned flag and NSEC3PARAM
if (zs.isDnssecZone) {
}
if(nonterm.size() > maxent) {
- L<<Logger::Error<<"AXFR zone "<<domain<<" has too many empty non terminals."<<endl;
+ g_log<<Logger::Error<<"AXFR zone "<<domain<<" has too many empty non terminals."<<endl;
nonterm.clear();
doent=false;
}
purgeAuthCaches(domain.toString()+"$");
- L<<Logger::Error<<"AXFR done for '"<<domain<<"', zone committed with serial number "<<zs.soa_serial<<endl;
+ g_log<<Logger::Error<<"AXFR done for '"<<domain<<"', zone committed with serial number "<<zs.soa_serial<<endl;
if(::arg().mustDo("slave-renotify"))
notifyDomain(domain);
}
catch(DBException &re) {
- L<<Logger::Error<<"Unable to feed record during incoming AXFR of '" << domain<<"': "<<re.reason<<endl;
+ g_log<<Logger::Error<<"Unable to feed record during incoming AXFR of '" << domain<<"': "<<re.reason<<endl;
if(di.backend && transaction) {
- L<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
+ g_log<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
di.backend->abortTransaction();
}
}
catch(MOADNSException &re) {
- L<<Logger::Error<<"Unable to parse record during incoming AXFR of '"<<domain<<"' (MOADNSException): "<<re.what()<<endl;
+ g_log<<Logger::Error<<"Unable to parse record during incoming AXFR of '"<<domain<<"' (MOADNSException): "<<re.what()<<endl;
if(di.backend && transaction) {
- L<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
+ g_log<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
di.backend->abortTransaction();
}
}
catch(std::exception &re) {
- L<<Logger::Error<<"Unable to parse record during incoming AXFR of '"<<domain<<"' (std::exception): "<<re.what()<<endl;
+ g_log<<Logger::Error<<"Unable to parse record during incoming AXFR of '"<<domain<<"' (std::exception): "<<re.what()<<endl;
if(di.backend && transaction) {
- L<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
+ g_log<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
di.backend->abortTransaction();
}
}
catch(ResolverException &re) {
- L<<Logger::Error<<"Unable to AXFR zone '"<<domain<<"' from remote '"<<remote<<"' (resolver): "<<re.reason<<endl;
+ g_log<<Logger::Error<<"Unable to AXFR zone '"<<domain<<"' from remote '"<<remote<<"' (resolver): "<<re.reason<<endl;
if(di.backend && transaction) {
- L<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
+ g_log<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
di.backend->abortTransaction();
}
}
catch(PDNSException &ae) {
- L<<Logger::Error<<"Unable to AXFR zone '"<<domain<<"' from remote '"<<remote<<"' (PDNSException): "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"Unable to AXFR zone '"<<domain<<"' from remote '"<<remote<<"' (PDNSException): "<<ae.reason<<endl;
if(di.backend && transaction) {
- L<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
+ g_log<<Logger::Error<<"Aborting possible open transaction for domain '"<<domain<<"' AXFR"<<endl;
di.backend->abortTransaction();
}
}
if(dk.getTSIGForAccess(di.zone, sr.master, &dni.tsigkeyname)) {
string secret64;
if(!B->getTSIGKey(dni.tsigkeyname, &dni.tsigalgname, &secret64)) {
- L<<Logger::Error<<"TSIG key '"<<dni.tsigkeyname<<"' for domain '"<<di.zone<<"' not found, can not AXFR."<<endl;
+ g_log<<Logger::Error<<"TSIG key '"<<dni.tsigkeyname<<"' for domain '"<<di.zone<<"' not found, can not AXFR."<<endl;
continue;
}
if (B64Decode(secret64, dni.tsigsecret) == -1) {
- L<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<dni.tsigkeyname<<"' for domain '"<<di.zone<<"', can not AXFR."<<endl;
+ g_log<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<dni.tsigkeyname<<"' for domain '"<<di.zone<<"', can not AXFR."<<endl;
continue;
}
}
if(B->getDomainMetadata(di.zone, "AXFR-SOURCE", localaddr) && !localaddr.empty()) {
try {
dni.localaddr = ComboAddress(localaddr[0]);
- L<<Logger::Info<<"Freshness check source (AXFR-SOURCE) for domain '"<<di.zone<<"' set to "<<localaddr[0]<<endl;
+ g_log<<Logger::Info<<"Freshness check source (AXFR-SOURCE) for domain '"<<di.zone<<"' set to "<<localaddr[0]<<endl;
}
catch(std::exception& e) {
- L<<Logger::Error<<"Failed to load freshness check source '"<<localaddr[0]<<"' for '"<<di.zone<<"': "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Failed to load freshness check source '"<<localaddr[0]<<"' for '"<<di.zone<<"': "<<e.what()<<endl;
return;
}
} else {
{
if(d_slaveschanged) {
Lock l(&d_lock);
- L<<Logger::Warning<<"No new unfresh slave domains, "<<d_suckdomains.size()<<" queued for AXFR already, "<<d_inprogress.size()<<" in progress"<<endl;
+ g_log<<Logger::Warning<<"No new unfresh slave domains, "<<d_suckdomains.size()<<" queued for AXFR already, "<<d_inprogress.size()<<" in progress"<<endl;
}
d_slaveschanged = !rdomains.empty();
return;
}
else {
Lock l(&d_lock);
- L<<Logger::Warning<<sdomains.size()<<" slave domain"<<(sdomains.size()>1 ? "s" : "")<<" need"<<
+ g_log<<Logger::Warning<<sdomains.size()<<" slave domain"<<(sdomains.size()>1 ? "s" : "")<<" need"<<
(sdomains.size()>1 ? "" : "s")<<
" checking, "<<d_suckdomains.size()<<" queued for AXFR"<<endl;
}
break;
}
catch(std::exception& e) {
- L<<Logger::Error<<"While checking domain freshness: " << e.what()<<endl;
+ g_log<<Logger::Error<<"While checking domain freshness: " << e.what()<<endl;
}
catch(PDNSException &re) {
- L<<Logger::Error<<"While checking domain freshness: " << re.reason<<endl;
+ g_log<<Logger::Error<<"While checking domain freshness: " << re.reason<<endl;
}
}
- L<<Logger::Warning<<"Received serial number updates for "<<ssr.d_freshness.size()<<" zone"<<addS(ssr.d_freshness.size())<<", had "<<ifl.getTimeouts()<<" timeout"<<addS(ifl.getTimeouts())<<endl;
+ g_log<<Logger::Warning<<"Received serial number updates for "<<ssr.d_freshness.size()<<" zone"<<addS(ssr.d_freshness.size())<<", had "<<ifl.getTimeouts()<<" timeout"<<addS(ifl.getTimeouts())<<endl;
typedef DomainNotificationInfo val_t;
time_t now = time(0);
// Please do not overwrite received DI just to make sure it exists in backend.
if(!di.backend) {
if (!B->getDomainInfo(di.zone, tempdi)) {
- L<<Logger::Warning<<"Ignore domain "<< di.zone<<" since it has been removed from our backend"<<endl;
+ g_log<<Logger::Warning<<"Ignore domain "<< di.zone<<" since it has been removed from our backend"<<endl;
continue;
}
// Backend for di still doesn't exist and this might cause us to
time_t nextCheck = now + std::min(newCount * d_tickinterval, (uint64_t)::arg().asNum("soa-retry-default"));
d_failedSlaveRefresh[di.zone] = {newCount, nextCheck};
if (newCount == 1 || newCount % 10 == 0)
- L<<Logger::Warning<<"Unable to retrieve SOA for "<<di.zone<<", this was the "<<(newCount == 1 ? "first" : std::to_string(newCount) + "th")<<" time."<<endl;
+ g_log<<Logger::Warning<<"Unable to retrieve SOA for "<<di.zone<<", this was the "<<(newCount == 1 ? "first" : std::to_string(newCount) + "th")<<" time."<<endl;
continue;
}
uint32_t theirserial = ssr.d_freshness[di.id].theirSerial, ourserial = di.serial;
if(rfc1982LessThan(theirserial, ourserial) && ourserial != 0 && !::arg().mustDo("axfr-lower-serial")) {
- L<<Logger::Error<<"Domain '"<<di.zone<<"' more recent than master, our serial " << ourserial << " > their serial "<< theirserial << endl;
+ g_log<<Logger::Error<<"Domain '"<<di.zone<<"' more recent than master, our serial " << ourserial << " > their serial "<< theirserial << endl;
di.backend->setFresh(di.id);
}
else if(theirserial == ourserial) {
}
}
if(! maxInception && ! ssr.d_freshness[di.id].theirInception) {
- L<<Logger::Info<<"Domain '"<< di.zone<<"' is fresh (no DNSSEC)"<<endl;
+ g_log<<Logger::Info<<"Domain '"<< di.zone<<"' is fresh (no DNSSEC)"<<endl;
di.backend->setFresh(di.id);
}
else if(maxInception == ssr.d_freshness[di.id].theirInception && maxExpire == ssr.d_freshness[di.id].theirExpire) {
- L<<Logger::Info<<"Domain '"<< di.zone<<"' is fresh and SOA RRSIGs match"<<endl;
+ g_log<<Logger::Info<<"Domain '"<< di.zone<<"' is fresh and SOA RRSIGs match"<<endl;
di.backend->setFresh(di.id);
}
else if(maxExpire >= now && ! ssr.d_freshness[di.id].theirInception ) {
- L<<Logger::Info<<"Domain '"<< di.zone<<"' is fresh, master is no longer signed but (some) signatures are still vallid"<<endl;
+ g_log<<Logger::Info<<"Domain '"<< di.zone<<"' is fresh, master is no longer signed but (some) signatures are still vallid"<<endl;
di.backend->setFresh(di.id);
}
else if(maxInception && ! ssr.d_freshness[di.id].theirInception ) {
- L<<Logger::Warning<<"Domain '"<< di.zone<<"' is stale, master is no longer signed and all signatures have expired"<<endl;
+ g_log<<Logger::Warning<<"Domain '"<< di.zone<<"' is stale, master is no longer signed and all signatures have expired"<<endl;
addSuckRequest(di.zone, *di.masters.begin());
}
else if(dk.doesDNSSEC() && ! maxInception && ssr.d_freshness[di.id].theirInception) {
- L<<Logger::Warning<<"Domain '"<< di.zone<<"' is stale, master has signed"<<endl;
+ g_log<<Logger::Warning<<"Domain '"<< di.zone<<"' is stale, master has signed"<<endl;
addSuckRequest(di.zone, *di.masters.begin());
}
else {
- L<<Logger::Warning<<"Domain '"<< di.zone<<"' is fresh, but RRSIGs differ, so DNSSEC is stale"<<endl;
+ g_log<<Logger::Warning<<"Domain '"<< di.zone<<"' is fresh, but RRSIGs differ, so DNSSEC is stale"<<endl;
addSuckRequest(di.zone, *di.masters.begin());
}
}
else {
- L<<Logger::Warning<<"Domain '"<< di.zone<<"' is stale, master serial "<<theirserial<<", our serial "<< ourserial <<endl;
+ g_log<<Logger::Warning<<"Domain '"<< di.zone<<"' is stale, master serial "<<theirserial<<", our serial "<< ourserial <<endl;
addSuckRequest(di.zone, *di.masters.begin());
}
}
SSqlStatement* execute() {
prepareStatement();
if (d_dolog)
- L<<Logger::Warning<< "Query: " << d_query << endl;
+ g_log<<Logger::Warning<< "Query: " << d_query << endl;
int attempts = d_db->inTransaction(); // try only once
while(attempts < 2 && (d_rc = sqlite3_step(d_stmt)) == SQLITE_BUSY) attempts++;
throw SSqlException(string("Unable to compile SQLite statement : '")+d_query+"': "+sqlite3_errmsg(d_db->db()));
}
if (pTail && strlen(pTail)>0)
- L<<Logger::Warning<<"Sqlite3 command partially processed. Unprocessed part: "<<pTail<<endl;
+ g_log<<Logger::Warning<<"Sqlite3 command partially processed. Unprocessed part: "<<pTail<<endl;
d_prepared = true;
}
bool resolversDefined()
{
if (s_resolversForStub.empty()) {
- L<<Logger::Warning<<"No upstream resolvers configured, stub resolving (including secpoll and ALIAS) impossible."<<endl;
+ g_log<<Logger::Warning<<"No upstream resolvers configured, stub resolving (including secpoll and ALIAS) impossible."<<endl;
return false;
}
return true;
for (const auto& server : s_resolversForStub) {
msg += server.toString() + ", ";
}
- L<<Logger::Debug<<msg.substr(0, msg.length() - 2)<<endl;
+ g_log<<Logger::Debug<<msg.substr(0, msg.length() - 2)<<endl;
for(const ComboAddress& dest : s_resolversForStub) {
Socket sock(dest.sin4.sin_family, SOCK_DGRAM);
ret.push_back(zrr);
}
}
- L<<Logger::Debug<<"Question got answered by "<<dest.toString()<<endl;
+ g_log<<Logger::Debug<<"Question got answered by "<<dest.toString()<<endl;
return mdp.d_header.rcode;
}
return RCode::ServFail;
bool SyncRes::s_rootNXTrust;
bool SyncRes::s_noEDNS;
-#define LOG(x) if(d_lm == Log) { L <<Logger::Warning << x; } else if(d_lm == Store) { d_trace << x; }
+#define LOG(x) if(d_lm == Log) { g_log <<Logger::Warning << x; } else if(d_lm == Store) { d_trace << x; }
static void accountAuthLatency(int usec, int family)
{
return res;
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Failed to update . records, got an exception: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Failed to update . records, got an exception: "<<e.reason<<endl;
}
catch(const ImmediateServFailException& e) {
- L<<Logger::Error<<"Failed to update . records, got an exception: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Failed to update . records, got an exception: "<<e.reason<<endl;
}
catch(const std::exception& e) {
- L<<Logger::Error<<"Failed to update . records, got an exception: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Failed to update . records, got an exception: "<<e.what()<<endl;
}
catch(...) {
- L<<Logger::Error<<"Failed to update . records, got an exception"<<endl;
+ g_log<<Logger::Error<<"Failed to update . records, got an exception"<<endl;
}
if(!res) {
- L<<Logger::Notice<<"Refreshed . records"<<endl;
+ g_log<<Logger::Notice<<"Refreshed . records"<<endl;
}
else
- L<<Logger::Error<<"Failed to update . records, RCODE="<<res<<endl;
+ g_log<<Logger::Error<<"Failed to update . records, RCODE="<<res<<endl;
return res;
}
void TCPNameserver::go()
{
- L<<Logger::Error<<"Creating backend connection for TCP"<<endl;
+ g_log<<Logger::Error<<"Creating backend connection for TCP"<<endl;
s_P=0;
try {
s_P=new PacketHandler;
}
catch(PDNSException &ae) {
- L<<Logger::Error<<"TCP server is unable to launch backends - will try again when questions come in: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"TCP server is unable to launch backends - will try again when questions come in: "<<ae.reason<<endl;
}
pthread_create(&d_tid, 0, launcher, static_cast<void *>(this));
}
pthread_detach(pthread_self());
if(getpeername(fd, (struct sockaddr *)&remote, &remotelen) < 0) {
- L<<Logger::Warning<<"Received question from socket which had no remote address, dropping ("<<stringerror()<<")"<<endl;
+ g_log<<Logger::Warning<<"Received question from socket which had no remote address, dropping ("<<stringerror()<<")"<<endl;
d_connectionroom_sem->post();
try {
closesocket(fd);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing TCP socket: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing TCP socket: "<<e.reason<<endl;
}
return 0;
}
int mesgsize=65535;
scoped_array<char> mesg(new char[mesgsize]);
- DLOG(L<<"TCP Connection accepted on fd "<<fd<<endl);
+ DLOG(g_log<<"TCP Connection accepted on fd "<<fd<<endl);
bool logDNSQueries= ::arg().mustDo("log-dns-queries");
for(;;) {
unsigned int remainingTime = 0;
transactions++;
if (d_maxTransactionsPerConn && transactions > d_maxTransactionsPerConn) {
- L << Logger::Notice<<"TCP Remote "<< remote <<" exceeded the number of transactions per connection, dropping.";
+ g_log << Logger::Notice<<"TCP Remote "<< remote <<" exceeded the number of transactions per connection, dropping.";
break;
}
if (maxConnectionDurationReached(d_maxConnectionDuration, start, remainingTime)) {
- L << Logger::Notice<<"TCP Remote "<< remote <<" exceeded the maximum TCP connection duration, dropping.";
+ g_log << Logger::Notice<<"TCP Remote "<< remote <<" exceeded the maximum TCP connection duration, dropping.";
break;
}
// do not remove this check as it will catch if someone
// decreases the mesg buffer size for some reason.
if(pktlen > mesgsize) {
- L<<Logger::Warning<<"Received an overly large question from "<<remote.toString()<<", dropping"<<endl;
+ g_log<<Logger::Warning<<"Received an overly large question from "<<remote.toString()<<", dropping"<<endl;
break;
}
if (maxConnectionDurationReached(d_maxConnectionDuration, start, remainingTime)) {
- L << Logger::Notice<<"TCP Remote "<< remote <<" exceeded the maximum TCP connection duration, dropping.";
+ g_log << Logger::Notice<<"TCP Remote "<< remote <<" exceeded the maximum TCP connection duration, dropping.";
break;
}
remote_text = packet->getRemote().toString() + "<-" + packet->getRealRemote().toString();
else
remote_text = packet->getRemote().toString();
- L << Logger::Notice<<"TCP Remote "<< remote_text <<" wants '" << packet->qdomain<<"|"<<packet->qtype.getName() <<
+ g_log << Logger::Notice<<"TCP Remote "<< remote_text <<" wants '" << packet->qdomain<<"|"<<packet->qtype.getName() <<
"', do = " <<packet->d_dnssecOk <<", bufsize = "<< packet->getMaxReplyLen()<<": ";
}
if(packet->couldBeCached() && PC.get(packet.get(), cached.get())) { // short circuit - does the PacketCache recognize this question?
if(logDNSQueries)
- L<<"packetcache HIT"<<endl;
+ g_log<<"packetcache HIT"<<endl;
cached->setRemote(&packet->d_remote);
cached->d.id=packet->d.id;
cached->d.rd=packet->d.rd; // copy in recursion desired bit
continue;
}
if(logDNSQueries)
- L<<"packetcache MISS"<<endl;
+ g_log<<"packetcache MISS"<<endl;
{
Lock l(&s_plock);
if(!s_P) {
- L<<Logger::Error<<"TCP server is without backend connections, launching"<<endl;
+ g_log<<Logger::Error<<"TCP server is without backend connections, launching"<<endl;
s_P=new PacketHandler;
}
Lock l(&s_plock);
delete s_P;
s_P = 0; // on next call, backend will be recycled
- L<<Logger::Error<<"TCP nameserver had error, cycling backend: "<<ae.reason<<endl;
+ g_log<<Logger::Error<<"TCP nameserver had error, cycling backend: "<<ae.reason<<endl;
}
catch(NetworkError &e) {
- L<<Logger::Info<<"TCP Connection Thread died because of network error: "<<e.what()<<endl;
+ g_log<<Logger::Info<<"TCP Connection Thread died because of network error: "<<e.what()<<endl;
}
catch(std::exception &e) {
- L<<Logger::Error<<"TCP Connection Thread died because of STL error: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"TCP Connection Thread died because of STL error: "<<e.what()<<endl;
}
catch( ... )
{
- L << Logger::Error << "TCP Connection Thread caught unknown exception." << endl;
+ g_log << Logger::Error << "TCP Connection Thread caught unknown exception." << endl;
}
d_connectionroom_sem->post();
closesocket(fd);
}
catch(const PDNSException& e) {
- L<<Logger::Error<<"Error closing TCP socket: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"Error closing TCP socket: "<<e.reason<<endl;
}
decrementClientCount(remote);
if (q->d_tsig_algo == TSIG_GSS) {
GssContext gssctx(keyname);
if (!gssctx.getPeerPrincipal(q->d_peer_principal)) {
- L<<Logger::Warning<<"Failed to extract peer principal from GSS context with keyname '"<<keyname<<"'"<<endl;
+ g_log<<Logger::Warning<<"Failed to extract peer principal from GSS context with keyname '"<<keyname<<"'"<<endl;
}
}
}
s_P->getBackend()->getDomainMetadata(q->qdomain, "GSS-ALLOW-AXFR-PRINCIPAL", princs);
for(const std::string& princ : princs) {
if (q->d_peer_principal == princ) {
- L<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: TSIG signed request with authorized principal '"<<q->d_peer_principal<<"' and algorithm 'gss-tsig'"<<endl;
+ g_log<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: TSIG signed request with authorized principal '"<<q->d_peer_principal<<"' and algorithm 'gss-tsig'"<<endl;
return true;
}
}
- L<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' denied: TSIG signed request with principal '"<<q->d_peer_principal<<"' and algorithm 'gss-tsig' is not permitted"<<endl;
+ g_log<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' denied: TSIG signed request with principal '"<<q->d_peer_principal<<"' and algorithm 'gss-tsig' is not permitted"<<endl;
return false;
}
if(!dk.TSIGGrantsAccess(q->qdomain, keyname)) {
- L<<Logger::Error<<"AXFR '"<<q->qdomain<<"' denied: key with name '"<<keyname<<"' and algorithm '"<<getTSIGAlgoName(q->d_tsig_algo)<<"' does not grant access to zone"<<endl;
+ g_log<<Logger::Error<<"AXFR '"<<q->qdomain<<"' denied: key with name '"<<keyname<<"' and algorithm '"<<getTSIGAlgoName(q->d_tsig_algo)<<"' does not grant access to zone"<<endl;
return false;
}
else {
- L<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: TSIG signed request with authorized key '"<<keyname<<"' and algorithm '"<<getTSIGAlgoName(q->d_tsig_algo)<<"'"<<endl;
+ g_log<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: TSIG signed request with authorized key '"<<keyname<<"' and algorithm '"<<getTSIGAlgoName(q->d_tsig_algo)<<"'"<<endl;
return true;
}
}
// cerr<<"checking allow-axfr-ips"<<endl;
if(!(::arg()["allow-axfr-ips"].empty()) && d_ng.match( (ComboAddress *) &q->d_remote )) {
- L<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: client IP "<<q->getRemote()<<" is in allow-axfr-ips"<<endl;
+ g_log<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: client IP "<<q->getRemote()<<" is in allow-axfr-ips"<<endl;
return true;
}
if(*k == q->getRemote().toString())
{
// cerr<<"got AUTO-NS hit"<<endl;
- L<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: client IP "<<q->getRemote()<<" is in NSset"<<endl;
+ g_log<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: client IP "<<q->getRemote()<<" is in NSset"<<endl;
return true;
}
}
Netmask nm = Netmask(*i);
if(nm.match( (ComboAddress *) &q->d_remote ))
{
- L<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: client IP "<<q->getRemote()<<" is in per-domain ACL"<<endl;
+ g_log<<Logger::Warning<<"AXFR of domain '"<<q->qdomain<<"' allowed: client IP "<<q->getRemote()<<" is in per-domain ACL"<<endl;
// cerr<<"hit!"<<endl;
return true;
}
extern CommunicatorClass Communicator;
if(Communicator.justNotified(q->qdomain, q->getRemote().toString())) { // we just notified this ip
- L<<Logger::Warning<<"Approved AXFR of '"<<q->qdomain<<"' from recently notified slave "<<q->getRemote()<<endl;
+ g_log<<Logger::Warning<<"Approved AXFR of '"<<q->qdomain<<"' from recently notified slave "<<q->getRemote()<<endl;
return true;
}
- L<<Logger::Error<<"AXFR of domain '"<<q->qdomain<<"' denied: client IP "<<q->getRemote()<<" has no permission"<<endl;
+ g_log<<Logger::Error<<"AXFR of domain '"<<q->qdomain<<"' denied: client IP "<<q->getRemote()<<" has no permission"<<endl;
return false;
}
if(q->d_dnssecOk)
outpacket->d_dnssecOk=true; // RFC 5936, 2.2.5 'SHOULD'
- L<<Logger::Error<<"AXFR of domain '"<<target<<"' initiated by "<<q->getRemote()<<endl;
+ g_log<<Logger::Error<<"AXFR of domain '"<<target<<"' initiated by "<<q->getRemote()<<endl;
// determine if zone exists and AXFR is allowed using existing backend before spawning a new backend.
SOAData sd;
{
Lock l(&s_plock);
- DLOG(L<<"Looking for SOA"<<endl); // find domain_id via SOA and list complete domain. No SOA, no AXFR
+ DLOG(g_log<<"Looking for SOA"<<endl); // find domain_id via SOA and list complete domain. No SOA, no AXFR
if(!s_P) {
- L<<Logger::Error<<"TCP server is without backend connections in doAXFR, launching"<<endl;
+ g_log<<Logger::Error<<"TCP server is without backend connections in doAXFR, launching"<<endl;
s_P=new PacketHandler;
}
if (!canDoAXFR(q)) {
- L<<Logger::Error<<"AXFR of domain '"<<target<<"' failed: "<<q->getRemote()<<" may not request AXFR"<<endl;
+ g_log<<Logger::Error<<"AXFR of domain '"<<target<<"' failed: "<<q->getRemote()<<" may not request AXFR"<<endl;
outpacket->setRcode(RCode::NotAuth);
sendPacket(outpacket,outsock);
return 0;
// canDoAXFR does all the ACL checks, and has the if(disable-axfr) shortcut, call it first.
if(!s_P->getBackend()->getSOAUncached(target, sd)) {
- L<<Logger::Error<<"AXFR of domain '"<<target<<"' failed: not authoritative"<<endl;
+ g_log<<Logger::Error<<"AXFR of domain '"<<target<<"' failed: not authoritative"<<endl;
outpacket->setRcode(RCode::NotAuth);
sendPacket(outpacket,outsock);
return 0;
UeberBackend db;
if(!db.getSOAUncached(target, sd)) {
- L<<Logger::Error<<"AXFR of domain '"<<target<<"' failed: not authoritative in second instance"<<endl;
+ g_log<<Logger::Error<<"AXFR of domain '"<<target<<"' failed: not authoritative in second instance"<<endl;
outpacket->setRcode(RCode::NotAuth);
sendPacket(outpacket,outsock);
return 0;
if(securedZone && dk.getNSEC3PARAM(target, &ns3pr, &narrow)) {
NSEC3Zone=true;
if(narrow) {
- L<<Logger::Error<<"Not doing AXFR of an NSEC3 narrow zone '"<<target<<"' for "<<q->getRemote()<<endl;
+ g_log<<Logger::Error<<"Not doing AXFR of an NSEC3 narrow zone '"<<target<<"' for "<<q->getRemote()<<endl;
noAXFRBecauseOfNSEC3Narrow=true;
}
}
if(noAXFRBecauseOfNSEC3Narrow) {
- L<<Logger::Error<<"AXFR of domain '"<<target<<"' denied to "<<q->getRemote()<<endl;
+ g_log<<Logger::Error<<"AXFR of domain '"<<target<<"' denied to "<<q->getRemote()<<endl;
outpacket->setRcode(RCode::Refused);
// FIXME: should actually figure out if we are auth over a zone, and send out 9 if we aren't
sendPacket(outpacket,outsock);
if (algorithm != DNSName("gss-tsig")) {
Lock l(&s_plock);
if(!s_P->getBackend()->getTSIGKey(tsigkeyname, &algorithm, &tsig64)) {
- L<<Logger::Error<<"TSIG key '"<<tsigkeyname<<"' for domain '"<<target<<"' not found"<<endl;
+ g_log<<Logger::Error<<"TSIG key '"<<tsigkeyname<<"' for domain '"<<target<<"' not found"<<endl;
return 0;
}
if (B64Decode(tsig64, tsigsecret) == -1) {
- L<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<tsigkeyname<<"' for domain '"<<target<<"'"<<endl;
+ g_log<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<tsigkeyname<<"' for domain '"<<target<<"'"<<endl;
return 0;
}
}
UeberBackend signatureDB;
// SOA *must* go out first, our signing pipe might reorder
- DLOG(L<<"Sending out SOA"<<endl);
+ DLOG(g_log<<"Sending out SOA"<<endl);
DNSZoneRecord soa = makeEditedDNSZRFromSOAData(dk, sd);
outpacket->addRecord(soa);
if(securedZone && !presignedZone) {
// now start list zone
if(!(sd.db->list(target, sd.domain_id))) {
- L<<Logger::Error<<"Backend signals error condition"<<endl;
+ g_log<<Logger::Error<<"Backend signals error condition"<<endl;
outpacket->setRcode(RCode::ServFail);
sendPacket(outpacket,outsock);
return 0;
int ret1 = stubDoResolve(getRR<ALIASRecordContent>(zrr.dr)->d_content, QType::A, ips);
int ret2 = stubDoResolve(getRR<ALIASRecordContent>(zrr.dr)->d_content, QType::AAAA, ips);
if(ret1 != RCode::NoError || ret2 != RCode::NoError) {
- L<<Logger::Error<<"Error resolving for ALIAS "<<zrr.dr.d_content->getZoneRepresentation()<<", aborting AXFR"<<endl;
+ g_log<<Logger::Error<<"Error resolving for ALIAS "<<zrr.dr.d_content->getZoneRepresentation()<<", aborting AXFR"<<endl;
outpacket->setRcode(RCode::ServFail);
sendPacket(outpacket,outsock);
return 0;
zrrs.push_back(zrr);
} else {
if (zrr.dr.d_type)
- L<<Logger::Warning<<"Zone '"<<target<<"' contains out-of-zone data '"<<zrr.dr.d_name<<"|"<<DNSRecordContent::NumberToType(zrr.dr.d_type)<<"', ignoring"<<endl;
+ g_log<<Logger::Warning<<"Zone '"<<target<<"' contains out-of-zone data '"<<zrr.dr.d_name<<"|"<<DNSRecordContent::NumberToType(zrr.dr.d_type)<<"', ignoring"<<endl;
}
}
while(shorter != target && shorter.chopOff()) {
if(!qnames.count(shorter) && !nonterm.count(shorter) && nsec3set.count(shorter)) {
if(!(maxent)) {
- L<<Logger::Warning<<"Zone '"<<target<<"' has too many empty non terminals."<<endl;
+ g_log<<Logger::Warning<<"Zone '"<<target<<"' has too many empty non terminals."<<endl;
return 0;
}
nonterm.insert(shorter);
udiff=dt.udiffNoReset();
if(securedZone)
- L<<Logger::Info<<"Done signing: "<<csp.d_signed/(udiff/1000000.0)<<" sigs/s, "<<endl;
+ g_log<<Logger::Info<<"Done signing: "<<csp.d_signed/(udiff/1000000.0)<<" sigs/s, "<<endl;
- DLOG(L<<"Done writing out records"<<endl);
+ DLOG(g_log<<"Done writing out records"<<endl);
/* and terminate with yet again the SOA record */
outpacket=getFreshAXFRPacket(q);
outpacket->addRecord(soa);
sendPacket(outpacket, outsock);
- DLOG(L<<"last packet - close"<<endl);
- L<<Logger::Error<<"AXFR of domain '"<<target<<"' to "<<q->getRemote()<<" finished"<<endl;
+ DLOG(g_log<<"last packet - close"<<endl);
+ g_log<<Logger::Error<<"AXFR of domain '"<<target<<"' to "<<q->getRemote()<<" finished"<<endl;
return 1;
}
serial=pdns_stou(parts[2]);
}
catch(const std::out_of_range& oor) {
- L<<Logger::Error<<"Invalid serial in IXFR query"<<endl;
+ g_log<<Logger::Error<<"Invalid serial in IXFR query"<<endl;
outpacket->setRcode(RCode::FormErr);
sendPacket(outpacket,outsock);
return 0;
}
} else {
- L<<Logger::Error<<"No serial in IXFR query"<<endl;
+ g_log<<Logger::Error<<"No serial in IXFR query"<<endl;
outpacket->setRcode(RCode::FormErr);
sendPacket(outpacket,outsock);
return 0;
}
} else if (rr->d_type != QType::TSIG && rr->d_type != QType::OPT) {
- L<<Logger::Error<<"Additional records in IXFR query, type: "<<QType(rr->d_type).getName()<<endl;
+ g_log<<Logger::Error<<"Additional records in IXFR query, type: "<<QType(rr->d_type).getName()<<endl;
outpacket->setRcode(RCode::FormErr);
sendPacket(outpacket,outsock);
return 0;
}
}
- L<<Logger::Error<<"IXFR of domain '"<<q->qdomain<<"' initiated by "<<q->getRemote()<<" with serial "<<serial<<endl;
+ g_log<<Logger::Error<<"IXFR of domain '"<<q->qdomain<<"' initiated by "<<q->getRemote()<<" with serial "<<serial<<endl;
// determine if zone exists and AXFR is allowed using existing backend before spawning a new backend.
SOAData sd;
{
Lock l(&s_plock);
- DLOG(L<<"Looking for SOA"<<endl); // find domain_id via SOA and list complete domain. No SOA, no IXFR
+ DLOG(g_log<<"Looking for SOA"<<endl); // find domain_id via SOA and list complete domain. No SOA, no IXFR
if(!s_P) {
- L<<Logger::Error<<"TCP server is without backend connections in doIXFR, launching"<<endl;
+ g_log<<Logger::Error<<"TCP server is without backend connections in doIXFR, launching"<<endl;
s_P=new PacketHandler;
}
// canDoAXFR does all the ACL checks, and has the if(disable-axfr) shortcut, call it first.
if(!canDoAXFR(q) || !s_P->getBackend()->getSOAUncached(q->qdomain, sd)) {
- L<<Logger::Error<<"IXFR of domain '"<<q->qdomain<<"' failed: not authoritative"<<endl;
+ g_log<<Logger::Error<<"IXFR of domain '"<<q->qdomain<<"' failed: not authoritative"<<endl;
outpacket->setRcode(RCode::NotAuth);
sendPacket(outpacket,outsock);
return 0;
bool securedZone = dk.isSecuredZone(q->qdomain);
if(dk.getNSEC3PARAM(q->qdomain, &ns3pr, &narrow)) {
if(narrow) {
- L<<Logger::Error<<"Not doing IXFR of an NSEC3 narrow zone."<<endl;
- L<<Logger::Error<<"IXFR of domain '"<<q->qdomain<<"' denied to "<<q->getRemote()<<endl;
+ g_log<<Logger::Error<<"Not doing IXFR of an NSEC3 narrow zone."<<endl;
+ g_log<<Logger::Error<<"IXFR of domain '"<<q->qdomain<<"' denied to "<<q->getRemote()<<endl;
outpacket->setRcode(RCode::Refused);
sendPacket(outpacket,outsock);
return 0;
UeberBackend db;
if(!db.getSOAUncached(target, sd)) {
- L<<Logger::Error<<"IXFR of domain '"<<target<<"' failed: not authoritative in second instance"<<endl;
+ g_log<<Logger::Error<<"IXFR of domain '"<<target<<"' failed: not authoritative in second instance"<<endl;
outpacket->setRcode(RCode::NotAuth);
sendPacket(outpacket,outsock);
return 0;
algorithm = DNSName("hmac-md5");
Lock l(&s_plock);
if(!s_P->getBackend()->getTSIGKey(tsigkeyname, &algorithm, &tsig64)) {
- L<<Logger::Error<<"TSIG key '"<<tsigkeyname<<"' for domain '"<<target<<"' not found"<<endl;
+ g_log<<Logger::Error<<"TSIG key '"<<tsigkeyname<<"' for domain '"<<target<<"' not found"<<endl;
return 0;
}
if (B64Decode(tsig64, tsigsecret) == -1) {
- L<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<tsigkeyname<<"' for domain '"<<target<<"'"<<endl;
+ g_log<<Logger::Error<<"Unable to Base-64 decode TSIG key '"<<tsigkeyname<<"' for domain '"<<target<<"'"<<endl;
return 0;
}
}
UeberBackend signatureDB;
// SOA *must* go out first, our signing pipe might reorder
- DLOG(L<<"Sending out SOA"<<endl);
+ DLOG(g_log<<"Sending out SOA"<<endl);
DNSZoneRecord soa = makeEditedDNSZRFromSOAData(dk, sd);
outpacket->addRecord(soa);
if(securedZone) {
sendPacket(outpacket, outsock);
- L<<Logger::Error<<"IXFR of domain '"<<target<<"' to "<<q->getRemote()<<" finished"<<endl;
+ g_log<<Logger::Error<<"IXFR of domain '"<<target<<"' to "<<q->getRemote()<<" finished"<<endl;
return 1;
}
- L<<Logger::Error<<"IXFR fallback to AXFR for domain '"<<target<<"' our serial "<<sd.serial<<endl;
+ g_log<<Logger::Error<<"IXFR fallback to AXFR for domain '"<<target<<"' our serial "<<sd.serial<<endl;
return doAXFR(q->qdomain, q, outsock);
}
int tmp=1;
if(setsockopt(s,SOL_SOCKET,SO_REUSEADDR,(char*)&tmp,sizeof tmp)<0) {
- L<<Logger::Error<<"Setsockopt failed"<<endl;
+ g_log<<Logger::Error<<"Setsockopt failed"<<endl;
_exit(1);
}
#ifdef TCP_FASTOPEN
int fastOpenQueueSize = ::arg().asNum("tcp-fast-open");
if (setsockopt(s, IPPROTO_TCP, TCP_FASTOPEN, &fastOpenQueueSize, sizeof fastOpenQueueSize) < 0) {
- L<<Logger::Error<<"Failed to enable TCP Fast Open for listening socket: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Failed to enable TCP Fast Open for listening socket: "<<strerror(errno)<<endl;
}
#else
- L<<Logger::Warning<<"TCP Fast Open configured but not supported for listening socket"<<endl;
+ g_log<<Logger::Warning<<"TCP Fast Open configured but not supported for listening socket"<<endl;
#endif
}
if(::bind(s, (sockaddr*)&local, local.getSocklen())<0) {
close(s);
if( errno == EADDRNOTAVAIL && ! ::arg().mustDo("local-address-nonexist-fail") ) {
- L<<Logger::Error<<"IPv4 Address " << *laddr << " does not exist on this server - skipping TCP bind" << endl;
+ g_log<<Logger::Error<<"IPv4 Address " << *laddr << " does not exist on this server - skipping TCP bind" << endl;
continue;
} else {
- L<<Logger::Error<<"Unable to bind to TCP socket " << *laddr << ": "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to bind to TCP socket " << *laddr << ": "<<strerror(errno)<<endl;
throw PDNSException("Unable to bind to TCP socket");
}
}
listen(s,128);
- L<<Logger::Error<<"TCP server bound to "<<local.toStringWithPort()<<endl;
+ g_log<<Logger::Error<<"TCP server bound to "<<local.toStringWithPort()<<endl;
d_sockets.push_back(s);
struct pollfd pfd;
memset(&pfd, 0, sizeof(pfd));
int tmp=1;
if(setsockopt(s,SOL_SOCKET,SO_REUSEADDR,(char*)&tmp,sizeof tmp)<0) {
- L<<Logger::Error<<"Setsockopt failed"<<endl;
+ g_log<<Logger::Error<<"Setsockopt failed"<<endl;
_exit(1);
}
#ifdef TCP_FASTOPEN
int fastOpenQueueSize = ::arg().asNum("tcp-fast-open");
if (setsockopt(s, IPPROTO_TCP, TCP_FASTOPEN, &fastOpenQueueSize, sizeof fastOpenQueueSize) < 0) {
- L<<Logger::Error<<"Failed to enable TCP Fast Open for listening socket: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Failed to enable TCP Fast Open for listening socket: "<<strerror(errno)<<endl;
}
#else
- L<<Logger::Warning<<"TCP Fast Open configured but not supported for listening socket"<<endl;
+ g_log<<Logger::Warning<<"TCP Fast Open configured but not supported for listening socket"<<endl;
#endif
}
if( ::arg().mustDo("non-local-bind") )
Utility::setBindAny(AF_INET6, s);
if(setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, &tmp, sizeof(tmp)) < 0) {
- L<<Logger::Error<<"Failed to set IPv6 socket to IPv6 only, continuing anyhow: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Failed to set IPv6 socket to IPv6 only, continuing anyhow: "<<strerror(errno)<<endl;
}
if(bind(s, (const sockaddr*)&local, local.getSocklen())<0) {
close(s);
if( errno == EADDRNOTAVAIL && ! ::arg().mustDo("local-ipv6-nonexist-fail") ) {
- L<<Logger::Error<<"IPv6 Address " << *laddr << " does not exist on this server - skipping TCP bind" << endl;
+ g_log<<Logger::Error<<"IPv6 Address " << *laddr << " does not exist on this server - skipping TCP bind" << endl;
continue;
} else {
- L<<Logger::Error<<"Unable to bind to TCPv6 socket" << *laddr << ": "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"Unable to bind to TCPv6 socket" << *laddr << ": "<<strerror(errno)<<endl;
throw PDNSException("Unable to bind to TCPv6 socket");
}
}
listen(s,128);
- L<<Logger::Error<<"TCPv6 server bound to "<<local.toStringWithPort()<<endl; // this gets %eth0 right
+ g_log<<Logger::Error<<"TCPv6 server bound to "<<local.toStringWithPort()<<endl; // this gets %eth0 right
d_sockets.push_back(s);
struct pollfd pfd;
addrlen=remote.getSocklen();
if((fd=accept(sock, (sockaddr*)&remote, &addrlen))<0) {
- L<<Logger::Error<<"TCP question accept error: "<<strerror(errno)<<endl;
+ g_log<<Logger::Error<<"TCP question accept error: "<<strerror(errno)<<endl;
if(errno==EMFILE) {
- L<<Logger::Error<<"TCP handler out of filedescriptors, exiting, won't recover from this"<<endl;
+ g_log<<Logger::Error<<"TCP handler out of filedescriptors, exiting, won't recover from this"<<endl;
_exit(1);
}
}
if (d_maxConnectionsPerClient) {
std::lock_guard<std::mutex> lock(s_clientsCountMutex);
if (s_clientsCount[remote] >= d_maxConnectionsPerClient) {
- L<<Logger::Notice<<"Limit of simultaneous TCP connections per client reached for "<< remote<<", dropping"<<endl;
+ g_log<<Logger::Notice<<"Limit of simultaneous TCP connections per client reached for "<< remote<<", dropping"<<endl;
close(fd);
continue;
}
int room;
d_connectionroom_sem->getValue( &room);
if(room<1)
- L<<Logger::Warning<<"Limit of simultaneous TCP connections reached - raise max-tcp-connections"<<endl;
+ g_log<<Logger::Warning<<"Limit of simultaneous TCP connections reached - raise max-tcp-connections"<<endl;
if(pthread_create(&tid, 0, &doConnection, reinterpret_cast<void*>(fd))) {
- L<<Logger::Error<<"Error creating thread: "<<stringerror()<<endl;
+ g_log<<Logger::Error<<"Error creating thread: "<<stringerror()<<endl;
d_connectionroom_sem->post();
close(fd);
decrementClientCount(remote);
}
}
catch(PDNSException &AE) {
- L<<Logger::Error<<"TCP Nameserver thread dying because of fatal error: "<<AE.reason<<endl;
+ g_log<<Logger::Error<<"TCP Nameserver thread dying because of fatal error: "<<AE.reason<<endl;
}
catch(...) {
- L<<Logger::Error<<"TCPNameserver dying because of an unexpected fatal error"<<endl;
+ g_log<<Logger::Error<<"TCPNameserver dying because of an unexpected fatal error"<<endl;
}
_exit(1); // take rest of server with us
}
try {
std::rethrow_if_nested(e);
} catch(const std::exception& exp) {
- theL()<<"Extra info: "<<exp.what();
+ g_log<<"Extra info: "<<exp.what();
}
}
delete r;
bool sign = false;
if (!p->getTKEYRecord(&tkey_in, &name)) {
- L<<Logger::Error<<"TKEY request but no TKEY RR found"<<endl;
+ g_log<<Logger::Error<<"TKEY request but no TKEY RR found"<<endl;
r->setRcode(RCode::FormErr);
return;
}
//! Loads a module and reports it to all UeberBackend threads
bool UeberBackend::loadmodule(const string &name)
{
- L<<Logger::Warning <<"Loading '"<<name<<"'" << endl;
+ g_log<<Logger::Warning <<"Loading '"<<name<<"'" << endl;
void *dlib=dlopen(name.c_str(), RTLD_NOW);
if(dlib == NULL) {
- L<<Logger::Error <<"Unable to load module '"<<name<<"': "<<dlerror() << endl;
+ g_log<<Logger::Error <<"Unable to load module '"<<name<<"': "<<dlerror() << endl;
return false;
}
cstat = cacheHas(d_question,d_answers);
if(cstat == 1 && !d_answers.empty() && d_cache_ttl) {
- DLOG(L<<Logger::Error<<"has pos cache entry: "<<shorter<<endl);
+ DLOG(g_log<<Logger::Error<<"has pos cache entry: "<<shorter<<endl);
fillSOAData(d_answers[0], *sd);
sd->db = 0;
sd->qname = shorter;
goto found;
} else if(cstat == 0 && d_negcache_ttl) {
- DLOG(L<<Logger::Error<<"has neg cache entry: "<<shorter<<endl);
+ DLOG(g_log<<Logger::Error<<"has neg cache entry: "<<shorter<<endl);
continue;
}
}
vector<pair<size_t, SOAData> >::iterator j = bestmatch.begin();
for(; i != backends.end() && j != bestmatch.end(); ++i, ++j) {
- DLOG(L<<Logger::Error<<"backend: "<<i-backends.begin()<<", qname: "<<shorter<<endl);
+ DLOG(g_log<<Logger::Error<<"backend: "<<i-backends.begin()<<", qname: "<<shorter<<endl);
if(j->first < shorter.wirelength()) {
- DLOG(L<<Logger::Error<<"skipped, we already found a shorter best match in this backend: "<<j->second.qname<<endl);
+ DLOG(g_log<<Logger::Error<<"skipped, we already found a shorter best match in this backend: "<<j->second.qname<<endl);
continue;
} else if(j->first == shorter.wirelength()) {
- DLOG(L<<Logger::Error<<"use shorter best match: "<<j->second.qname<<endl);
+ DLOG(g_log<<Logger::Error<<"use shorter best match: "<<j->second.qname<<endl);
*sd = j->second;
break;
} else {
- DLOG(L<<Logger::Error<<"lookup: "<<shorter<<endl);
+ DLOG(g_log<<Logger::Error<<"lookup: "<<shorter<<endl);
if((*i)->getAuth(shorter, sd)) {
- DLOG(L<<Logger::Error<<"got: "<<sd->qname<<endl);
+ DLOG(g_log<<Logger::Error<<"got: "<<sd->qname<<endl);
j->first = sd->qname.wirelength();
j->second = *sd;
if(sd->qname == shorter) {
break;
}
} else {
- DLOG(L<<Logger::Error<<"no match for: "<<shorter<<endl);
+ DLOG(g_log<<Logger::Error<<"no match for: "<<shorter<<endl);
}
}
}
// Add to cache
if(i == backends.end()) {
if(d_negcache_ttl) {
- DLOG(L<<Logger::Error<<"add neg cache entry:"<<shorter<<endl);
+ DLOG(g_log<<Logger::Error<<"add neg cache entry:"<<shorter<<endl);
d_question.qname=shorter;
addNegCache(d_question);
}
continue;
} else if(d_cache_ttl) {
- DLOG(L<<Logger::Error<<"add pos cache entry: "<<sd->qname<<endl);
+ DLOG(g_log<<Logger::Error<<"add pos cache entry: "<<sd->qname<<endl);
d_question.qtype = QType::SOA;
d_question.qname = sd->qname;
d_question.zoneId = -1;
found:
if(found == (qtype == QType::DS) || target != shorter) {
- DLOG(L<<Logger::Error<<"found: "<<sd->qname<<endl);
+ DLOG(g_log<<Logger::Error<<"found: "<<sd->qname<<endl);
return true;
} else {
- DLOG(L<<Logger::Error<<"chasing next: "<<sd->qname<<endl);
+ DLOG(g_log<<Logger::Error<<"chasing next: "<<sd->qname<<endl);
found = true;
}
}
rrs.clear();
- // L<<Logger::Warning<<"looking up: '"<<q.qname+"'|N|"+q.qtype.getName()+"|"+itoa(q.zoneId)<<endl;
+ // g_log<<Logger::Warning<<"looking up: '"<<q.qname+"'|N|"+q.qtype.getName()+"|"+itoa(q.zoneId)<<endl;
bool ret=QC.getEntry(q.qname, q.qtype, rrs, q.zoneId); // think about lowercasing here
if(!ret) {
UeberBackend::~UeberBackend()
{
- DLOG(L<<Logger::Error<<"UeberBackend destructor called, removing ourselves from instances, and deleting our backends"<<endl);
+ DLOG(g_log<<Logger::Error<<"UeberBackend destructor called, removing ourselves from instances, and deleting our backends"<<endl);
cleanup();
}
void UeberBackend::lookup(const QType &qtype,const DNSName &qname, DNSPacket *pkt_p, int zoneId)
{
if(d_stale) {
- L<<Logger::Error<<"Stale ueberbackend received question, signalling that we want to be recycled"<<endl;
+ g_log<<Logger::Error<<"Stale ueberbackend received question, signalling that we want to be recycled"<<endl;
throw PDNSException("We are stale, please recycle");
}
- DLOG(L<<"UeberBackend received question for "<<qtype.getName()<<" of "<<qname<<endl);
+ DLOG(g_log<<"UeberBackend received question for "<<qtype.getName()<<" of "<<qname<<endl);
if(!d_go) {
pthread_mutex_lock(&d_mut);
while (d_go==false) {
- L<<Logger::Error<<"UeberBackend is blocked, waiting for 'go'"<<endl;
+ g_log<<Logger::Error<<"UeberBackend is blocked, waiting for 'go'"<<endl;
pthread_cond_wait(&d_cond, &d_mut);
- L<<Logger::Error<<"Broadcast received, unblocked"<<endl;
+ g_log<<Logger::Error<<"Broadcast received, unblocked"<<endl;
}
pthread_mutex_unlock(&d_mut);
}
d_ancount=0;
if(!backends.size()) {
- L<<Logger::Error<<"No database backends available - unable to answer questions."<<endl;
+ g_log<<Logger::Error<<"No database backends available - unable to answer questions."<<endl;
d_stale=true; // please recycle us!
throw PDNSException("We are stale, please recycle");
}
UeberBackend::handle::handle()
{
- // L<<Logger::Warning<<"Handle instances: "<<instances<<endl;
+ // g_log<<Logger::Warning<<"Handle instances: "<<instances<<endl;
++instances;
parent=NULL;
d_hinterBackend=NULL;
bool UeberBackend::handle::get(DNSZoneRecord &r)
{
- DLOG(L << "Ueber get() was called for a "<<qtype.getName()<<" record" << endl);
+ DLOG(g_log << "Ueber get() was called for a "<<qtype.getName()<<" record" << endl);
bool isMore=false;
while(d_hinterBackend && !(isMore=d_hinterBackend->get(r))) { // this backend out of answers
if(i<parent->backends.size()) {
- DLOG(L<<"Backend #"<<i<<" of "<<parent->backends.size()
+ DLOG(g_log<<"Backend #"<<i<<" of "<<parent->backends.size()
<<" out of answers, taking next"<<endl);
d_hinterBackend=parent->backends[i++];
else
break;
- DLOG(L<<"Now asking backend #"<<i<<endl);
+ DLOG(g_log<<"Now asking backend #"<<i<<endl);
}
if(!isMore && i==parent->backends.size()) {
- DLOG(L<<"UeberBackend reached end of backends"<<endl);
+ DLOG(g_log<<"UeberBackend reached end of backends"<<endl);
return false;
}
- DLOG(L<<"Found an answering backend - will not try another one"<<endl);
+ DLOG(g_log<<"Found an answering backend - will not try another one"<<endl);
i=parent->backends.size(); // don't go on to the next backend
return true;
}
{
m_pSemaphore=new sem_t;
if (sem_init(m_pSemaphore, 0, value) == -1) {
- theL() << Logger::Error << "Cannot create semaphore: " << stringerror() << endl;
+ g_log << Logger::Error << "Cannot create semaphore: " << stringerror() << endl;
exit(1);
}
}
(void) one; // avoids 'unused var' warning on systems that have none of the defines checked below
#ifdef IP_FREEBIND
if (setsockopt(sock, IPPROTO_IP, IP_FREEBIND, &one, sizeof(one)) < 0)
- theL()<<Logger::Warning<<"Warning: IP_FREEBIND setsockopt failed: "<<strerror(errno)<<endl;
+ g_log<<Logger::Warning<<"Warning: IP_FREEBIND setsockopt failed: "<<strerror(errno)<<endl;
#endif
#ifdef IP_BINDANY
if (af == AF_INET)
if (setsockopt(sock, IPPROTO_IP, IP_BINDANY, &one, sizeof(one)) < 0)
- theL()<<Logger::Warning<<"Warning: IP_BINDANY setsockopt failed: "<<strerror(errno)<<endl;
+ g_log<<Logger::Warning<<"Warning: IP_BINDANY setsockopt failed: "<<strerror(errno)<<endl;
#endif
#ifdef IPV6_BINDANY
if (af == AF_INET6)
if (setsockopt(sock, IPPROTO_IPV6, IPV6_BINDANY, &one, sizeof(one)) < 0)
- theL()<<Logger::Warning<<"Warning: IPV6_BINDANY setsockopt failed: "<<strerror(errno)<<endl;
+ g_log<<Logger::Warning<<"Warning: IPV6_BINDANY setsockopt failed: "<<strerror(errno)<<endl;
#endif
#ifdef SO_BINDANY
if (setsockopt(sock, SOL_SOCKET, SO_BINDANY, &one, sizeof(one)) < 0)
- theL()<<Logger::Warning<<"Warning: SO_BINDANY setsockopt failed: "<<strerror(errno)<<endl;
+ g_log<<Logger::Warning<<"Warning: SO_BINDANY setsockopt failed: "<<strerror(errno)<<endl;
#endif
}
{
if(gid) {
if(setgid(gid)<0) {
- theL()<<Logger::Critical<<"Unable to set effective group id to "<<gid<<": "<<stringerror()<<endl;
+ g_log<<Logger::Critical<<"Unable to set effective group id to "<<gid<<": "<<stringerror()<<endl;
exit(1);
}
else
- theL()<<Logger::Info<<"Set effective group id to "<<gid<<endl;
+ g_log<<Logger::Info<<"Set effective group id to "<<gid<<endl;
struct passwd *pw=getpwuid(uid);
if(!pw) {
- theL()<<Logger::Warning<<"Unable to determine user name for uid "<<uid<<endl;
+ g_log<<Logger::Warning<<"Unable to determine user name for uid "<<uid<<endl;
if (setgroups(0, NULL)<0) {
- theL()<<Logger::Critical<<"Unable to drop supplementary gids: "<<stringerror()<<endl;
+ g_log<<Logger::Critical<<"Unable to drop supplementary gids: "<<stringerror()<<endl;
exit(1);
}
} else {
if (initgroups(pw->pw_name, gid)<0) {
- theL()<<Logger::Critical<<"Unable to set supplementary groups: "<<stringerror()<<endl;
+ g_log<<Logger::Critical<<"Unable to set supplementary groups: "<<stringerror()<<endl;
exit(1);
}
}
{
if(uid) {
if(setuid(uid)<0) {
- theL()<<Logger::Critical<<"Unable to set effective user id to "<<uid<<": "<<stringerror()<<endl;
+ g_log<<Logger::Critical<<"Unable to set effective user id to "<<uid<<": "<<stringerror()<<endl;
exit(1);
}
else
- theL()<<Logger::Info<<"Set effective user id to "<<uid<<endl;
+ g_log<<Logger::Info<<"Set effective user id to "<<uid<<endl;
}
}
errno=0;
struct group *gr=getgrnam(group.c_str());
if(!gr) {
- theL()<<Logger::Critical<<"Unable to look up gid of group '"<<group<<"': "<< (errno ? strerror(errno) : "not found") <<endl;
+ g_log<<Logger::Critical<<"Unable to look up gid of group '"<<group<<"': "<< (errno ? strerror(errno) : "not found") <<endl;
exit(1);
}
newgid=gr->gr_gid;
if(!(newuid=atoi(username.c_str()))) {
struct passwd *pw=getpwnam(username.c_str());
if(!pw) {
- theL()<<Logger::Critical<<"Unable to look up uid of user '"<<username<<"': "<< (errno ? strerror(errno) : "not found") <<endl;
+ g_log<<Logger::Critical<<"Unable to look up uid of user '"<<username<<"': "<< (errno ? strerror(errno) : "not found") <<endl;
exit(1);
}
newuid=pw->pw_uid;
bool warnIfDNSSECDisabled(const string& msg) {
if(g_dnssecmode == DNSSECMode::Off) {
if (!msg.empty())
- L<<Logger::Warning<<msg<<endl;
+ g_log<<Logger::Warning<<msg<<endl;
return true;
}
return false;
bool g_dnssecLOG{false};
uint16_t g_maxNSEC3Iterations{0};
-#define LOG(x) if(g_dnssecLOG) { L <<Logger::Warning << x; }
+#define LOG(x) if(g_dnssecLOG) { g_log <<Logger::Warning << x; }
void dotEdge(DNSName zone, string type1, DNSName name1, string tag1, string type2, DNSName name2, string tag2, string color="");
void dotNode(string type, DNSName name, string tag, string content);
string dotName(string type, DNSName name, string tag);
void showProductVersion()
{
- theL()<<Logger::Warning<<productName()<<" "<< VERSION << " (C) 2001-2018 "
+ g_log<<Logger::Warning<<productName()<<" "<< VERSION << " (C) 2001-2018 "
"PowerDNS.COM BV" << endl;
- theL()<<Logger::Warning<<"Using "<<(sizeof(unsigned long)*8)<<"-bits mode. "
+ g_log<<Logger::Warning<<"Using "<<(sizeof(unsigned long)*8)<<"-bits mode. "
"Built using " << compilerVersion()
#ifndef REPRODUCIBLE
<<" on " __DATE__ " " __TIME__ " by " BUILD_HOST
#endif
<<"."<< endl;
- theL()<<Logger::Warning<<"PowerDNS comes with ABSOLUTELY NO WARRANTY. "
+ g_log<<Logger::Warning<<"PowerDNS comes with ABSOLUTELY NO WARRANTY. "
"This is free software, and you are welcome to redistribute it "
"according to the terms of the GPL version 2." << endl;
}
void showBuildConfiguration()
{
- theL()<<Logger::Warning<<"Features: "<<
+ g_log<<Logger::Warning<<"Features: "<<
#ifdef HAVE_BOTAN
"botan" << BOTAN_VERSION_MAJOR << "." << BOTAN_VERSION_MINOR << " " <<
#endif
endl;
#ifdef PDNS_MODULES
// Auth only
- theL()<<Logger::Warning<<"Built-in modules: "<<PDNS_MODULES<<endl;
+ g_log<<Logger::Warning<<"Built-in modules: "<<PDNS_MODULES<<endl;
#endif
#ifdef PDNS_CONFIG_ARGS
#define double_escape(s) #s
#define escape_quotes(s) double_escape(s)
- theL()<<Logger::Warning<<"Configured with: "<<escape_quotes(PDNS_CONFIG_ARGS)<<endl;
+ g_log<<Logger::Warning<<"Configured with: "<<escape_quotes(PDNS_CONFIG_ARGS)<<endl;
#undef escape_quotes
#undef double_escape
#endif
{
string err;
if(this->body.empty()) {
- L<<Logger::Debug<<"HTTP: JSON document expected in request body, but body was empty" << endl;
+ g_log<<Logger::Debug<<"HTTP: JSON document expected in request body, but body was empty" << endl;
throw HttpBadRequestException();
}
json11::Json doc = json11::Json::parse(this->body, err);
if (doc.is_null()) {
- L<<Logger::Debug<<"HTTP: parsing of JSON document failed:" << err << endl;
+ g_log<<Logger::Debug<<"HTTP: parsing of JSON document failed:" << err << endl;
throw HttpBadRequestException();
}
return doc;
resp->headers["access-control-allow-origin"] = "*";
if (api_key.empty()) {
- L<<Logger::Error<<"HTTP API Request \"" << req->url.path << "\": Authentication failed, API Key missing in config" << endl;
+ g_log<<Logger::Error<<"HTTP API Request \"" << req->url.path << "\": Authentication failed, API Key missing in config" << endl;
throw HttpUnauthorizedException("X-API-Key");
}
bool auth_ok = req->compareHeader("x-api-key", api_key) || req->getvars["api-key"]==api_key;
if (!auth_ok) {
- L<<Logger::Error<<"HTTP Request \"" << req->url.path << "\": Authentication by API Key failed" << endl;
+ g_log<<Logger::Error<<"HTTP Request \"" << req->url.path << "\": Authentication by API Key failed" << endl;
throw HttpUnauthorizedException("X-API-Key");
}
if (!web_password.empty()) {
bool auth_ok = req->compareAuthorization(web_password);
if (!auth_ok) {
- L<<Logger::Debug<<"HTTP Request \"" << req->url.path << "\": Web Authentication failed" << endl;
+ g_log<<Logger::Debug<<"HTTP Request \"" << req->url.path << "\": Web Authentication failed" << endl;
throw HttpUnauthorizedException("Basic");
}
}
try {
if (!req.complete) {
- L<<Logger::Debug<<"HTTP: Incomplete request" << endl;
+ g_log<<Logger::Debug<<"HTTP: Incomplete request" << endl;
throw HttpBadRequestException();
}
- L<<Logger::Debug<<"HTTP: Handling request \"" << req.url.path << "\"" << endl;
+ g_log<<Logger::Debug<<"HTTP: Handling request \"" << req.url.path << "\"" << endl;
YaHTTP::strstr_map_t::iterator header;
YaHTTP::THandlerFunction handler;
if (!YaHTTP::Router::Route(&req, handler)) {
- L<<Logger::Debug<<"HTTP: No route found for \"" << req.url.path << "\"" << endl;
+ g_log<<Logger::Debug<<"HTTP: No route found for \"" << req.url.path << "\"" << endl;
throw HttpNotFoundException();
}
try {
handler(&req, &resp);
- L<<Logger::Debug<<"HTTP: Result for \"" << req.url.path << "\": " << resp.status << ", body length: " << resp.body.size() << endl;
+ g_log<<Logger::Debug<<"HTTP: Result for \"" << req.url.path << "\": " << resp.status << ", body length: " << resp.body.size() << endl;
}
catch(HttpException&) {
throw;
}
catch(PDNSException &e) {
- L<<Logger::Error<<"HTTP ISE for \""<< req.url.path << "\": Exception: " << e.reason << endl;
+ g_log<<Logger::Error<<"HTTP ISE for \""<< req.url.path << "\": Exception: " << e.reason << endl;
throw HttpInternalServerErrorException();
}
catch(std::exception &e) {
- L<<Logger::Error<<"HTTP ISE for \""<< req.url.path << "\": STL Exception: " << e.what() << endl;
+ g_log<<Logger::Error<<"HTTP ISE for \""<< req.url.path << "\": STL Exception: " << e.what() << endl;
throw HttpInternalServerErrorException();
}
catch(...) {
- L<<Logger::Error<<"HTTP ISE for \""<< req.url.path << "\": Unknown Exception" << endl;
+ g_log<<Logger::Error<<"HTTP ISE for \""<< req.url.path << "\": Unknown Exception" << endl;
throw HttpInternalServerErrorException();
}
}
catch(HttpException &e) {
resp = e.response();
- L<<Logger::Debug<<"HTTP: Error result for \"" << req.url.path << "\": " << resp.status << endl;
+ g_log<<Logger::Debug<<"HTTP: Error result for \"" << req.url.path << "\": " << resp.status << endl;
string what = YaHTTP::Utility::status2text(resp.status);
if(req.accept_html) {
resp.headers["Content-Type"] = "text/html; charset=utf-8";
client->writenWithTimeout(reply.c_str(), reply.size(), timeout);
}
catch(PDNSException &e) {
- L<<Logger::Error<<"HTTP Exception: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"HTTP Exception: "<<e.reason<<endl;
}
catch(std::exception &e) {
if(strstr(e.what(), "timeout")==0)
- L<<Logger::Error<<"HTTP STL Exception: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"HTTP STL Exception: "<<e.what()<<endl;
}
catch(...) {
- L<<Logger::Error<<"HTTP: Unknown exception"<<endl;
+ g_log<<Logger::Error<<"HTTP: Unknown exception"<<endl;
}
WebServer::WebServer(const string &listenaddress, int port) : d_server(nullptr)
{
try {
d_server = createServer();
- L<<Logger::Warning<<"Listening for HTTP requests on "<<d_server->d_local.toStringWithPort()<<endl;
+ g_log<<Logger::Warning<<"Listening for HTTP requests on "<<d_server->d_local.toStringWithPort()<<endl;
}
catch(NetworkError &e) {
- L<<Logger::Error<<"Listening on HTTP socket failed: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"Listening on HTTP socket failed: "<<e.what()<<endl;
d_server = nullptr;
}
}
} else {
ComboAddress remote;
if (client->getRemote(remote))
- L<<Logger::Error<<"Webserver closing socket: remote ("<< remote.toString() <<") does not match 'webserver-allow-from'"<<endl;
+ g_log<<Logger::Error<<"Webserver closing socket: remote ("<< remote.toString() <<") does not match 'webserver-allow-from'"<<endl;
}
}
catch(PDNSException &e) {
- L<<Logger::Error<<"PDNSException while accepting a connection in main webserver thread: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"PDNSException while accepting a connection in main webserver thread: "<<e.reason<<endl;
}
catch(std::exception &e) {
- L<<Logger::Error<<"STL Exception while accepting a connection in main webserver thread: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"STL Exception while accepting a connection in main webserver thread: "<<e.what()<<endl;
}
catch(...) {
- L<<Logger::Error<<"Unknown exception while accepting a connection in main webserver thread"<<endl;
+ g_log<<Logger::Error<<"Unknown exception while accepting a connection in main webserver thread"<<endl;
}
}
}
catch(PDNSException &e) {
- L<<Logger::Error<<"PDNSException in main webserver thread: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"PDNSException in main webserver thread: "<<e.reason<<endl;
}
catch(std::exception &e) {
- L<<Logger::Error<<"STL Exception in main webserver thread: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"STL Exception in main webserver thread: "<<e.what()<<endl;
}
catch(...) {
- L<<Logger::Error<<"Unknown exception in main webserver thread"<<endl;
+ g_log<<Logger::Error<<"Unknown exception in main webserver thread"<<endl;
}
_exit(1);
}
}
}
catch(...) {
- L<<Logger::Error<<"Webserver statThread caught an exception, dying"<<endl;
+ g_log<<Logger::Error<<"Webserver statThread caught an exception, dying"<<endl;
_exit(1);
}
}
d_ws->go();
}
catch(...) {
- L<<Logger::Error<<"AuthWebServer thread caught an exception, dying"<<endl;
+ g_log<<Logger::Error<<"AuthWebServer thread caught an exception, dying"<<endl;
_exit(1);
}
}
}
} catch (NetworkError &e) {
// we're running in a shared process/thread, so can't just terminate/abort.
- L<<Logger::Warning<<"Network error in web thread: "<<e.what()<<endl;
+ g_log<<Logger::Warning<<"Network error in web thread: "<<e.what()<<endl;
return;
}
catch (...) {
- L<<Logger::Warning<<"Unknown error in web thread"<<endl;
+ g_log<<Logger::Warning<<"Unknown error in web thread"<<endl;
return;
}
// now send the reply
if (asendtcp(data, client.get()) == -1 || data.empty()) {
- L<<Logger::Error<<"Failed sending reply to HTTP client"<<endl;
+ g_log<<Logger::Error<<"Failed sending reply to HTTP client"<<endl;
}
}
catch(PDNSException &e) {
- L<<Logger::Error<<"HTTP Exception: "<<e.reason<<endl;
+ g_log<<Logger::Error<<"HTTP Exception: "<<e.reason<<endl;
}
catch(std::exception &e) {
if(strstr(e.what(), "timeout")==0)
- L<<Logger::Error<<"HTTP STL Exception: "<<e.what()<<endl;
+ g_log<<Logger::Error<<"HTTP STL Exception: "<<e.what()<<endl;
}
catch(...) {
- L<<Logger::Error<<"HTTP: Unknown exception"<<endl;
+ g_log<<Logger::Error<<"HTTP: Unknown exception"<<endl;
}
void AsyncWebServer::go() {