text
stringlengths
81
112k
Get a parameter field value def _getField(self, field, native=0, prompt=1): """Get a parameter field value""" try: # expand field name using minimum match field = _getFieldDict[field] except KeyError as e: # re-raise the exception with a bit more info raise SyntaxError("Cannot get field " + field + " for parameter " + self.name + "\n" + str(e)) if field == "p_value": # return value of parameter # Note that IRAF returns the filename for list parameters # when p_value is used. I consider this a bug, and it does # not appear to be used by any cl scripts or SPP programs # in either IRAF or STSDAS. It is also in conflict with # the IRAF help documentation. I am making p_value exactly # the same as just a simple CL parameter reference. return self.get(native=native,prompt=prompt) elif field == "p_name": return self.name elif field == "p_xtype": return self.type elif field == "p_type": return self._getPType() elif field == "p_mode": return self.mode elif field == "p_prompt": return self.prompt elif field == "p_scope": return self.scope elif field == "p_default" or field == "p_filename": # these all appear to be equivalent -- they just return the # current PFilename of the parameter (which is the same as the value # for non-list parameters, and is the filename for list parameters) return self._getPFilename(native,prompt) elif field == "p_maximum": if native: return self.max else: return self.toString(self.max) elif field == "p_minimum": if self.choice is not None: if native: return self.choice else: schoice = list(map(self.toString, self.choice)) return "|" + "|".join(schoice) + "|" else: if native: return self.min else: return self.toString(self.min) else: # XXX unimplemented fields: # p_length: maximum string length in bytes -- what to do with it? raise RuntimeError("Program bug in IrafPar._getField()\n" + "Requested field " + field + " for parameter " + self.name)
Set a parameter field value def _setField(self, value, field, check=1): """Set a parameter field value""" try: # expand field name using minimum match field = _setFieldDict[field] except KeyError as e: raise SyntaxError("Cannot set field " + field + " for parameter " + self.name + "\n" + str(e)) if field == "p_prompt": self.prompt = irafutils.removeEscapes(irafutils.stripQuotes(value)) elif field == "p_value": self.set(value,check=check) elif field == "p_filename": # this is only relevant for list parameters (*imcur, *gcur, etc.) self.set(value,check=check) elif field == "p_scope": self.scope = value elif field == "p_maximum": self.max = self._coerceOneValue(value) elif field == "p_minimum": if isinstance(value,str) and '|' in value: self._setChoice(irafutils.stripQuotes(value)) else: self.min = self._coerceOneValue(value) elif field == "p_mode": # not doing any type or value checking here -- setting mode is # rare, so assume that it is being done correctly self.mode = irafutils.stripQuotes(value) else: raise RuntimeError("Program bug in IrafPar._setField()" + "Requested field " + field + " for parameter " + self.name)
Return .par format string for this parameter If dolist is set, returns fields as a list of strings. Default is to return a single string appropriate for writing to a file. def save(self, dolist=0): """Return .par format string for this parameter If dolist is set, returns fields as a list of strings. Default is to return a single string appropriate for writing to a file. """ quoted = not dolist array_size = 1 for d in self.shape: array_size = d*array_size ndim = len(self.shape) fields = (7+2*ndim+len(self.value))*[""] fields[0] = self.name fields[1] = self.type fields[2] = self.mode fields[3] = str(ndim) next = 4 for d in self.shape: fields[next] = str(d); next += 1 fields[next] = '1'; next += 1 nvstart = 7+2*ndim if self.choice is not None: schoice = list(map(self.toString, self.choice)) schoice.insert(0,'') schoice.append('') fields[nvstart-3] = repr('|'.join(schoice)) elif self.min not in [None,INDEF]: fields[nvstart-3] = self.toString(self.min,quoted=quoted) # insert an escaped line break before min field if quoted: fields[nvstart-3] = '\\\n' + fields[nvstart-3] if self.max not in [None,INDEF]: fields[nvstart-2] = self.toString(self.max,quoted=quoted) if self.prompt: if quoted: sprompt = repr(self.prompt) else: sprompt = self.prompt # prompt can have embedded newlines (which are printed) sprompt = sprompt.replace(r'\012', '\n') sprompt = sprompt.replace(r'\n', '\n') fields[nvstart-1] = sprompt for i in range(len(self.value)): fields[nvstart+i] = self.toString(self.value[i],quoted=quoted) # insert an escaped line break before value fields if dolist: return fields else: fields[nvstart] = '\\\n' + fields[nvstart] return ','.join(fields)
Return dpar-style executable assignment for parameter Default is to write CL version of code; if cl parameter is false, writes Python executable code instead. Note that dpar doesn't even work for arrays in the CL, so we just use Python syntax here. def dpar(self, cl=1): """Return dpar-style executable assignment for parameter Default is to write CL version of code; if cl parameter is false, writes Python executable code instead. Note that dpar doesn't even work for arrays in the CL, so we just use Python syntax here. """ sval = list(map(self.toString, self.value, len(self.value)*[1])) for i in range(len(sval)): if sval[i] == "": sval[i] = "None" s = "%s = [%s]" % (self.name, ', '.join(sval)) return s
Return value of this parameter as a string (or in native format if native is non-zero.) def get(self, field=None, index=None, lpar=0, prompt=1, native=0, mode="h"): """Return value of this parameter as a string (or in native format if native is non-zero.)""" if field: return self._getField(field,native=native,prompt=prompt) # may prompt for value if prompt flag is set #XXX should change _optionalPrompt so we prompt for each element of #XXX the array separately? I think array parameters are #XXX not useful as non-hidden params. if prompt: self._optionalPrompt(mode) if index is not None: sumindex = self._sumindex(index) try: if native: return self.value[sumindex] else: return self.toString(self.value[sumindex]) except IndexError: # should never happen raise SyntaxError("Illegal index [" + repr(sumindex) + "] for array parameter " + self.name) elif native: # return object itself for an array because it is # indexable, can have values assigned, etc. return self else: # return blank-separated string of values for array return str(self)
Set value of this parameter from a string or other value. Field is optional parameter field (p_prompt, p_minimum, etc.) Index is optional array index (zero-based). Set check=0 to assign the value without checking to see if it is within the min-max range or in the choice list. def set(self, value, field=None, index=None, check=1): """Set value of this parameter from a string or other value. Field is optional parameter field (p_prompt, p_minimum, etc.) Index is optional array index (zero-based). Set check=0 to assign the value without checking to see if it is within the min-max range or in the choice list.""" if index is not None: sumindex = self._sumindex(index) try: value = self._coerceOneValue(value) if check: self.value[sumindex] = self.checkOneValue(value) else: self.value[sumindex] = value return except IndexError: # should never happen raise SyntaxError("Illegal index [" + repr(sumindex) + "] for array parameter " + self.name) if field: self._setField(value,field,check=check) else: if check: self.value = self.checkValue(value) else: self.value = self._coerceValue(value) self.setChanged()
Check and convert a parameter value. Raises an exception if the value is not permitted for this parameter. Otherwise returns the value (converted to the right type.) def checkValue(self,value,strict=0): """Check and convert a parameter value. Raises an exception if the value is not permitted for this parameter. Otherwise returns the value (converted to the right type.) """ v = self._coerceValue(value,strict) for i in range(len(v)): self.checkOneValue(v[i],strict=strict) return v
Convert tuple index to 1-D index into value def _sumindex(self, index=None): """Convert tuple index to 1-D index into value""" try: ndim = len(index) except TypeError: # turn index into a 1-tuple index = (index,) ndim = 1 if len(self.shape) != ndim: raise ValueError("Index to %d-dimensional array %s has too %s dimensions" % (len(self.shape), self.name, ["many","few"][len(self.shape) > ndim])) sumindex = 0 for i in range(ndim-1,-1,-1): index1 = index[i] if index1 < 0 or index1 >= self.shape[i]: raise ValueError("Dimension %d index for array %s is out of bounds (value=%d)" % (i+1, self.name, index1)) sumindex = index1 + sumindex*self.shape[i] return sumindex
Coerce parameter to appropriate type Should accept None or null string. Must be an array. def _coerceValue(self,value,strict=0): """Coerce parameter to appropriate type Should accept None or null string. Must be an array. """ try: if isinstance(value,str): # allow single blank-separated string as input value = value.split() if len(value) != len(self.value): raise IndexError v = len(self.value)*[0] for i in range(len(v)): v[i] = self._coerceOneValue(value[i],strict) return v except (IndexError, TypeError): raise ValueError("Value must be a " + repr(len(self.value)) + "-element array for " + self.name)
Create min-match dictionary for choice list def _setChoiceDict(self): """Create min-match dictionary for choice list""" # value is full name of choice parameter self.choiceDict = minmatch.MinMatchDict() for c in self.choice: self.choiceDict.add(c, c)
Check initial attributes to make sure they are legal def _checkAttribs(self, strict): """Check initial attributes to make sure they are legal""" if self.min: warning("Minimum value not allowed for boolean-type parameter " + self.name, strict) self.min = None if self.max: if not self.prompt: warning("Maximum value not allowed for boolean-type parameter " + self.name + " (probably missing comma)", strict) # try to recover by assuming max string is prompt self.prompt = self.max else: warning("Maximum value not allowed for boolean-type parameter " + self.name, strict) self.max = None if self.choice: warning("Choice values not allowed for boolean-type parameter " + self.name, strict) self.choice = None
Check initial attributes to make sure they are legal def _checkAttribs(self, strict): """Check initial attributes to make sure they are legal""" if self.choice: warning("Choice values not allowed for real-type parameter " + self.name, strict) self.choice = None
Updates the device with the given data. Supports a json payload like { fs: newFs samplesPerBatch: samplesPerBatch gyroEnabled: true gyroSensitivity: 500 accelerometerEnabled: true accelerometerSensitivity: 2 } A heartbeat is sent on completion of the request to ensure the analyser gets a rapid update. :return: the device and 200 if the update was ok, 400 if not. def patch(self, deviceId): """ Updates the device with the given data. Supports a json payload like { fs: newFs samplesPerBatch: samplesPerBatch gyroEnabled: true gyroSensitivity: 500 accelerometerEnabled: true accelerometerSensitivity: 2 } A heartbeat is sent on completion of the request to ensure the analyser gets a rapid update. :return: the device and 200 if the update was ok, 400 if not. """ try: device = self.recordingDevices.get(deviceId) if device.status == RecordingDeviceStatus.INITIALISED: errors = self._handlePatch(device) if len(errors) == 0: return device, 200 else: return device, 500 else: return device, 400 finally: logger.info("Sending adhoc heartbeat on device state update") self.heartbeater.sendHeartbeat()
Check if this is a legal date in the Gregorian calendar def legal_date(year, month, day): '''Check if this is a legal date in the Gregorian calendar''' if month == 2: daysinmonth = 29 if isleap(year) else 28 else: daysinmonth = 30 if month in HAVE_30_DAYS else 31 if not (0 < day <= daysinmonth): raise ValueError("Month {} doesn't have a day {}".format(month, day)) return True
Gregorian to Julian Day Count for years between 1801-2099 def to_jd2(year, month, day): '''Gregorian to Julian Day Count for years between 1801-2099''' # http://quasar.as.utexas.edu/BillInfo/JulianDatesG.html legal_date(year, month, day) if month <= 2: year = year - 1 month = month + 12 a = floor(year / 100) b = floor(a / 4) c = 2 - a + b e = floor(365.25 * (year + 4716)) f = floor(30.6001 * (month + 1)) return c + day + e + f - 1524.5
Return Gregorian date in a (Y, M, D) tuple def from_jd(jd): '''Return Gregorian date in a (Y, M, D) tuple''' wjd = floor(jd - 0.5) + 0.5 depoch = wjd - EPOCH quadricent = floor(depoch / INTERCALATION_CYCLE_DAYS) dqc = depoch % INTERCALATION_CYCLE_DAYS cent = floor(dqc / LEAP_SUPPRESSION_DAYS) dcent = dqc % LEAP_SUPPRESSION_DAYS quad = floor(dcent / LEAP_CYCLE_DAYS) dquad = dcent % LEAP_CYCLE_DAYS yindex = floor(dquad / YEAR_DAYS) year = ( quadricent * INTERCALATION_CYCLE_YEARS + cent * LEAP_SUPPRESSION_YEARS + quad * LEAP_CYCLE_YEARS + yindex ) if not (cent == 4 or yindex == 4): year += 1 yearday = wjd - to_jd(year, 1, 1) leap = isleap(year) if yearday < 58 + leap: leap_adj = 0 elif leap: leap_adj = 1 else: leap_adj = 2 month = floor((((yearday + leap_adj) * 12) + 373) / 367) day = int(wjd - to_jd(year, month, 1)) + 1 return (year, month, day)
Stores a new item in the cache if it is allowed in. :param name: the name. :param hingePoints: the hinge points. :return: true if it is stored. def storeFromHinge(self, name, hingePoints): """ Stores a new item in the cache if it is allowed in. :param name: the name. :param hingePoints: the hinge points. :return: true if it is stored. """ if name not in self._cache: if self._valid(hingePoints): self._cache[name] = {'name': name, 'type': 'hinge', 'hinge': hingePoints} self.writeCache() return True return False
Stores a new item in the cache. :param name: file name. :param start: start time. :param end: end time. :return: true if stored. def storeFromWav(self, uploadCacheEntry, start, end): """ Stores a new item in the cache. :param name: file name. :param start: start time. :param end: end time. :return: true if stored. """ prefix = uploadCacheEntry['name'] + '_' + start + '_' + end match = next((x for x in self._cache.values() if x['type'] == 'wav' and x['name'].startswith(prefix)), None) if match is None: cached = [ { 'name': prefix + '_' + n, 'analysis': n, 'start': start, 'end': end, 'type': 'wav', 'filename': uploadCacheEntry['name'] } for n in ['spectrum', 'peakSpectrum'] ] for cache in cached: self._cache[cache['name']] = cache self.writeCache() return True return False
Deletes the named entry in the cache. :param name: the name. :return: true if it is deleted. def delete(self, name): """ Deletes the named entry in the cache. :param name: the name. :return: true if it is deleted. """ if name in self._cache: del self._cache[name] self.writeCache() # TODO clean files return True return False
reads the specified file. :param name: the name. :return: the analysis as frequency/Pxx. def analyse(self, name): """ reads the specified file. :param name: the name. :return: the analysis as frequency/Pxx. """ if name in self._cache: target = self._cache[name] if target['type'] == 'wav': signal = self._uploadController.loadSignal(target['filename'], start=target['start'] if target['start'] != 'start' else None, end=target['end'] if target['end'] != 'end' else None) if signal is not None: # TODO allow user defined window return getattr(signal, target['analysis'])(ref=1.0) else: return None, 404 pass elif target['type'] == 'hinge': hingePoints = np.array(target['hinge']).astype(np.float64) x = hingePoints[:, 1] y = hingePoints[:, 0] # extend as straight line from 0 to 500 if x[0] != 0: x = np.insert(x, 0, 0.0000001) y = np.insert(y, 0, y[0]) if x[-1] != 500: x = np.insert(x, len(x), 500.0) y = np.insert(y, len(y), y[-1]) # convert the y axis dB values into a linear value y = 10 ** (y / 10) # perform a logspace interpolation f = self.log_interp1d(x, y) # remap to 0-500 xnew = np.linspace(x[0], x[-1], num=500, endpoint=False) # and convert back to dB return xnew, 10 * np.log10(f(xnew)) else: logger.error('Unknown target type with name ' + name) return None
Performs a log space 1d interpolation. :param xx: the x values. :param yy: the y values. :param kind: the type of interpolation to apply (as per scipy interp1d) :return: the interpolation function. def log_interp1d(self, xx, yy, kind='linear'): """ Performs a log space 1d interpolation. :param xx: the x values. :param yy: the y values. :param kind: the type of interpolation to apply (as per scipy interp1d) :return: the interpolation function. """ logx = np.log10(xx) logy = np.log10(yy) lin_interp = interp1d(logx, logy, kind=kind) log_interp = lambda zz: np.power(10.0, lin_interp(np.log10(zz))) return log_interp
Retrieve the Julian date equivalent for this date def to_jd(year, month, day): "Retrieve the Julian date equivalent for this date" return day + (month - 1) * 30 + (year - 1) * 365 + floor(year / 4) + EPOCH - 1
Create a new date from a Julian date. def from_jd(jdc): "Create a new date from a Julian date." cdc = floor(jdc) + 0.5 - EPOCH year = floor((cdc - floor((cdc + 366) / 1461)) / 365) + 1 yday = jdc - to_jd(year, 1, 1) month = floor(yday / 30) + 1 day = yday - (month - 1) * 30 + 1 return year, month, day
Computes the roll angle at the target position based on:: the roll angle at the V1 axis(roll), the dec of the target(dec), and the V2/V3 position of the aperture (v2,v3) in arcseconds. Based on the algorithm provided by Colin Cox that is used in Generic Conversion at STScI. def troll(roll, dec, v2, v3): """ Computes the roll angle at the target position based on:: the roll angle at the V1 axis(roll), the dec of the target(dec), and the V2/V3 position of the aperture (v2,v3) in arcseconds. Based on the algorithm provided by Colin Cox that is used in Generic Conversion at STScI. """ # Convert all angles to radians _roll = DEGTORAD(roll) _dec = DEGTORAD(dec) _v2 = DEGTORAD(v2 / 3600.) _v3 = DEGTORAD(v3 / 3600.) # compute components sin_rho = sqrt((pow(sin(_v2),2)+pow(sin(_v3),2)) - (pow(sin(_v2),2)*pow(sin(_v3),2))) rho = asin(sin_rho) beta = asin(sin(_v3)/sin_rho) if _v2 < 0: beta = pi - beta gamma = asin(sin(_v2)/sin_rho) if _v3 < 0: gamma = pi - gamma A = pi/2. + _roll - beta B = atan2( sin(A)*cos(_dec), (sin(_dec)*sin_rho - cos(_dec)*cos(rho)*cos(A))) # compute final value troll = RADTODEG(pi - (gamma+B)) return troll
Prints out archived WCS keywords. def print_archive(self,format=True): """ Prints out archived WCS keywords.""" if len(list(self.orig_wcs.keys())) > 0: block = 'Original WCS keywords for ' + self.rootname+ '\n' block += ' backed up on '+repr(self.orig_wcs['WCSCDATE'])+'\n' if not format: for key in self.wcstrans.keys(): block += key.upper() + " = " + repr(self.get_archivekw(key)) + '\n' block = 'PA_V3: '+repr(self.pa_obs)+'\n' else: block += 'CD_11 CD_12: '+repr(self.get_archivekw('CD1_1'))+' '+repr(self.get_archivekw('CD1_2')) +'\n' block += 'CD_21 CD_22: '+repr(self.get_archivekw('CD2_1'))+' '+repr(self.get_archivekw('CD2_2')) +'\n' block += 'CRVAL : '+repr(self.get_archivekw('CRVAL1'))+' '+repr(self.get_archivekw('CRVAL2')) + '\n' block += 'CRPIX : '+repr(self.get_archivekw('CRPIX1'))+' '+repr(self.get_archivekw('CRPIX2')) + '\n' block += 'NAXIS : '+repr(int(self.get_archivekw('NAXIS1')))+' '+repr(int(self.get_archivekw('NAXIS2'))) + '\n' block += 'Plate Scale : '+repr(self.get_archivekw('pixel scale'))+'\n' block += 'ORIENTAT : '+repr(self.get_archivekw('ORIENTAT'))+'\n' print(block)
Compute the pixel scale based on active WCS values. def set_pscale(self): """ Compute the pixel scale based on active WCS values. """ if self.new: self.pscale = 1.0 else: self.pscale = self.compute_pscale(self.cd11,self.cd21)
Compute the pixel scale based on active WCS values. def compute_pscale(self,cd11,cd21): """ Compute the pixel scale based on active WCS values. """ return N.sqrt(N.power(cd11,2)+N.power(cd21,2)) * 3600.
Return the computed orientation based on CD matrix. def set_orient(self): """ Return the computed orientation based on CD matrix. """ self.orient = RADTODEG(N.arctan2(self.cd12,self.cd22))
Create a new CD Matrix from the absolute pixel scale and reference image orientation. def updateWCS(self, pixel_scale=None, orient=None,refpos=None,refval=None,size=None): """ Create a new CD Matrix from the absolute pixel scale and reference image orientation. """ # Set up parameters necessary for updating WCS # Check to see if new value is provided, # If not, fall back on old value as the default _updateCD = no if orient is not None and orient != self.orient: pa = DEGTORAD(orient) self.orient = orient self._orient_lin = orient _updateCD = yes else: # In case only pixel_scale was specified pa = DEGTORAD(self.orient) if pixel_scale is not None and pixel_scale != self.pscale: _ratio = pixel_scale / self.pscale self.pscale = pixel_scale _updateCD = yes else: # In case, only orient was specified pixel_scale = self.pscale _ratio = None # If a new plate scale was given, # the default size should be revised accordingly # along with the default reference pixel position. # Added 31 Mar 03, WJH. if _ratio is not None: self.naxis1 /= _ratio self.naxis2 /= _ratio self.crpix1 = self.naxis1/2. self.crpix2 = self.naxis2/2. # However, if the user provides a given size, # set it to use that no matter what. if size is not None: self.naxis1 = size[0] self.naxis2 = size[1] # Insure that naxis1,2 always return as integer values. self.naxis1 = int(self.naxis1) self.naxis2 = int(self.naxis2) if refpos is not None: self.crpix1 = refpos[0] self.crpix2 = refpos[1] if self.crpix1 is None: self.crpix1 = self.naxis1/2. self.crpix2 = self.naxis2/2. if refval is not None: self.crval1 = refval[0] self.crval2 = refval[1] # Reset WCS info now... if _updateCD: # Only update this should the pscale or orientation change... pscale = pixel_scale / 3600. self.cd11 = -pscale * N.cos(pa) self.cd12 = pscale * N.sin(pa) self.cd21 = self.cd12 self.cd22 = -self.cd11 # Now make sure that all derived values are really up-to-date based # on these changes self.update()
Scale the WCS to a new pixel_scale. The 'retain' parameter [default value: True] controls whether or not to retain the original distortion solution in the CD matrix. def scale_WCS(self,pixel_scale,retain=True): ''' Scale the WCS to a new pixel_scale. The 'retain' parameter [default value: True] controls whether or not to retain the original distortion solution in the CD matrix. ''' _ratio = pixel_scale / self.pscale # Correct the size of the image and CRPIX values for scaled WCS self.naxis1 /= _ratio self.naxis2 /= _ratio self.crpix1 = self.naxis1/2. self.crpix2 = self.naxis2/2. if retain: # Correct the WCS while retaining original distortion information self.cd11 *= _ratio self.cd12 *= _ratio self.cd21 *= _ratio self.cd22 *= _ratio else: pscale = pixel_scale / 3600. self.cd11 = -pscale * N.cos(pa) self.cd12 = pscale * N.sin(pa) self.cd21 = self.cd12 self.cd22 = -self.cd11 # Now make sure that all derived values are really up-to-date based # on these changes self.update()
This method would apply the WCS keywords to a position to generate a new sky position. The algorithm comes directly from 'imgtools.xy2rd' translate (x,y) to (ra, dec) def xy2rd(self,pos): """ This method would apply the WCS keywords to a position to generate a new sky position. The algorithm comes directly from 'imgtools.xy2rd' translate (x,y) to (ra, dec) """ if self.ctype1.find('TAN') < 0 or self.ctype2.find('TAN') < 0: print('XY2RD only supported for TAN projections.') raise TypeError if isinstance(pos,N.ndarray): # If we are working with an array of positions, # point to just X and Y values posx = pos[:,0] posy = pos[:,1] else: # Otherwise, we are working with a single X,Y tuple posx = pos[0] posy = pos[1] xi = self.cd11 * (posx - self.crpix1) + self.cd12 * (posy - self.crpix2) eta = self.cd21 * (posx - self.crpix1) + self.cd22 * (posy - self.crpix2) xi = DEGTORAD(xi) eta = DEGTORAD(eta) ra0 = DEGTORAD(self.crval1) dec0 = DEGTORAD(self.crval2) ra = N.arctan((xi / (N.cos(dec0)-eta*N.sin(dec0)))) + ra0 dec = N.arctan( ((eta*N.cos(dec0)+N.sin(dec0)) / (N.sqrt((N.cos(dec0)-eta*N.sin(dec0))**2 + xi**2))) ) ra = RADTODEG(ra) dec = RADTODEG(dec) ra = DIVMOD(ra, 360.) # Otherwise, just return the RA,Dec tuple. return ra,dec
This method would use the WCS keywords to compute the XY position from a given RA/Dec tuple (in deg). NOTE: Investigate how to let this function accept arrays as well as single positions. WJH 27Mar03 def rd2xy(self,skypos,hour=no): """ This method would use the WCS keywords to compute the XY position from a given RA/Dec tuple (in deg). NOTE: Investigate how to let this function accept arrays as well as single positions. WJH 27Mar03 """ if self.ctype1.find('TAN') < 0 or self.ctype2.find('TAN') < 0: print('RD2XY only supported for TAN projections.') raise TypeError det = self.cd11*self.cd22 - self.cd12*self.cd21 if det == 0.0: raise ArithmeticError("singular CD matrix!") cdinv11 = self.cd22 / det cdinv12 = -self.cd12 / det cdinv21 = -self.cd21 / det cdinv22 = self.cd11 / det # translate (ra, dec) to (x, y) ra0 = DEGTORAD(self.crval1) dec0 = DEGTORAD(self.crval2) if hour: skypos[0] = skypos[0] * 15. ra = DEGTORAD(skypos[0]) dec = DEGTORAD(skypos[1]) bottom = float(N.sin(dec)*N.sin(dec0) + N.cos(dec)*N.cos(dec0)*N.cos(ra-ra0)) if bottom == 0.0: raise ArithmeticError("Unreasonable RA/Dec range!") xi = RADTODEG((N.cos(dec) * N.sin(ra-ra0) / bottom)) eta = RADTODEG((N.sin(dec)*N.cos(dec0) - N.cos(dec)*N.sin(dec0)*N.cos(ra-ra0)) / bottom) x = cdinv11 * xi + cdinv12 * eta + self.crpix1 y = cdinv21 * xi + cdinv22 * eta + self.crpix2 return x,y
Rotates WCS CD matrix to new orientation given by 'orient' def rotateCD(self,orient): """ Rotates WCS CD matrix to new orientation given by 'orient' """ # Determine where member CRVAL position falls in ref frame # Find out whether this needs to be rotated to align with # reference frame. _delta = self.get_orient() - orient if _delta == 0.: return # Start by building the rotation matrix... _rot = fileutil.buildRotMatrix(_delta) # ...then, rotate the CD matrix and update the values... _cd = N.array([[self.cd11,self.cd12],[self.cd21,self.cd22]],dtype=N.float64) _cdrot = N.dot(_cd,_rot) self.cd11 = _cdrot[0][0] self.cd12 = _cdrot[0][1] self.cd21 = _cdrot[1][0] self.cd22 = _cdrot[1][1] self.orient = orient
Write out the values of the WCS keywords to the specified image. If it is a GEIS image and 'fitsname' has been provided, it will automatically make a multi-extension FITS copy of the GEIS and update that file. Otherwise, it throw an Exception if the user attempts to directly update a GEIS image header. If archive=True, also write out archived WCS keyword values to file. If overwrite=True, replace archived WCS values in file with new values. If a WCSObject is passed through the 'wcs' keyword, then the WCS keywords of this object are copied to the header of the image to be updated. A use case fo rthis is updating the WCS of a WFPC2 data quality (_c1h.fits) file in order to be in sync with the science (_c0h.fits) file. def write(self,fitsname=None,wcs=None,archive=True,overwrite=False,quiet=True): """ Write out the values of the WCS keywords to the specified image. If it is a GEIS image and 'fitsname' has been provided, it will automatically make a multi-extension FITS copy of the GEIS and update that file. Otherwise, it throw an Exception if the user attempts to directly update a GEIS image header. If archive=True, also write out archived WCS keyword values to file. If overwrite=True, replace archived WCS values in file with new values. If a WCSObject is passed through the 'wcs' keyword, then the WCS keywords of this object are copied to the header of the image to be updated. A use case fo rthis is updating the WCS of a WFPC2 data quality (_c1h.fits) file in order to be in sync with the science (_c0h.fits) file. """ ## Start by making sure all derived values are in sync with CD matrix self.update() image = self.rootname _fitsname = fitsname if image.find('.fits') < 0 and _fitsname is not None: # A non-FITS image was provided, and openImage made a copy # Update attributes to point to new copy instead self.geisname = image image = self.rootname = _fitsname # Open image as writable FITS object fimg = fileutil.openImage(image, mode='update', fitsname=_fitsname) _root,_iextn = fileutil.parseFilename(image) _extn = fileutil.getExtn(fimg,_iextn) # Write out values to header... if wcs: _wcsobj = wcs else: _wcsobj = self for key in _wcsobj.wcstrans.keys(): _dkey = _wcsobj.wcstrans[key] if _dkey != 'pscale': _extn.header[key] = _wcsobj.__dict__[_dkey] # Close the file fimg.close() del fimg if archive: self.write_archive(fitsname=fitsname,overwrite=overwrite,quiet=quiet)
Reset the active WCS keywords to values stored in the backup keywords. def restore(self): """ Reset the active WCS keywords to values stored in the backup keywords. """ # If there are no backup keys, do nothing... if len(list(self.backup.keys())) == 0: return for key in self.backup.keys(): if key != 'WCSCDATE': self.__dict__[self.wcstrans[key]] = self.orig_wcs[self.backup[key]] self.update()
Create backup copies of the WCS keywords with the given prepended string. If backup keywords are already present, only update them if 'overwrite' is set to 'yes', otherwise, do warn the user and do nothing. Set the WCSDATE at this time as well. def archive(self,prepend=None,overwrite=no,quiet=yes): """ Create backup copies of the WCS keywords with the given prepended string. If backup keywords are already present, only update them if 'overwrite' is set to 'yes', otherwise, do warn the user and do nothing. Set the WCSDATE at this time as well. """ # Verify that existing backup values are not overwritten accidentally. if len(list(self.backup.keys())) > 0 and overwrite == no: if not quiet: print('WARNING: Backup WCS keywords already exist! No backup made.') print(' The values can only be overridden if overwrite=yes.') return # Establish what prepend string to use... if prepend is None: if self.prepend is not None: _prefix = self.prepend else: _prefix = DEFAULT_PREFIX else: _prefix = prepend # Update backup and orig_wcs dictionaries # We have archive keywords and a defined prefix # Go through and append them to self.backup self.prepend = _prefix for key in self.wcstrans.keys(): if key != 'pixel scale': _archive_key = self._buildNewKeyname(key,_prefix) else: _archive_key = self.prepend.lower()+'pscale' # if key != 'pixel scale': self.orig_wcs[_archive_key] = self.__dict__[self.wcstrans[key]] self.backup[key] = _archive_key self.revert[_archive_key] = key # Setup keyword to record when these keywords were backed up. self.orig_wcs['WCSCDATE']= fileutil.getLTime() self.backup['WCSCDATE'] = 'WCSCDATE' self.revert['WCSCDATE'] = 'WCSCDATE'
Extract a copy of WCS keywords from an open file header, if they have already been created and remember the prefix used for those keywords. Otherwise, setup the current WCS keywords as the archive values. def read_archive(self,header,prepend=None): """ Extract a copy of WCS keywords from an open file header, if they have already been created and remember the prefix used for those keywords. Otherwise, setup the current WCS keywords as the archive values. """ # Start by looking for the any backup WCS keywords to # determine whether archived values are present and to set # the prefix used. _prefix = None _archive = False if header is not None: for kw in header.items(): if kw[0][1:] in self.wcstrans.keys(): _prefix = kw[0][0] _archive = True break if not _archive: self.archive(prepend=prepend) return # We have archive keywords and a defined prefix # Go through and append them to self.backup if _prefix is not None: self.prepend = _prefix else: self.prepend = DEFAULT_PREFIX for key in self.wcstrans.keys(): _archive_key = self._buildNewKeyname(key,_prefix) if key!= 'pixel scale': if _archive_key in header: self.orig_wcs[_archive_key] = header[_archive_key] else: self.orig_wcs[_archive_key] = header[key] self.backup[key] = _archive_key self.revert[_archive_key] = key # Establish plate scale value _cd11str = self.prepend+'CD1_1' _cd21str = self.prepend+'CD2_1' pscale = self.compute_pscale(self.orig_wcs[_cd11str],self.orig_wcs[_cd21str]) _archive_key = self.prepend.lower()+'pscale' self.orig_wcs[_archive_key] = pscale self.backup['pixel scale'] = _archive_key self.revert[_archive_key] = 'pixel scale' # Setup keyword to record when these keywords were backed up. if 'WCSCDATE' in header: self.orig_wcs['WCSCDATE'] = header['WCSCDATE'] else: self.orig_wcs['WCSCDATE'] = fileutil.getLTime() self.backup['WCSCDATE'] = 'WCSCDATE' self.revert['WCSCDATE'] = 'WCSCDATE'
Saves a copy of the WCS keywords from the image header as new keywords with the user-supplied 'prepend' character(s) prepended to the old keyword names. If the file is a GEIS image and 'fitsname' is not None, create a FITS copy and update that version; otherwise, raise an Exception and do not update anything. def write_archive(self,fitsname=None,overwrite=no,quiet=yes): """ Saves a copy of the WCS keywords from the image header as new keywords with the user-supplied 'prepend' character(s) prepended to the old keyword names. If the file is a GEIS image and 'fitsname' is not None, create a FITS copy and update that version; otherwise, raise an Exception and do not update anything. """ _fitsname = fitsname # Open image in update mode # Copying of GEIS images handled by 'openImage'. fimg = fileutil.openImage(self.rootname,mode='update',fitsname=_fitsname) if self.rootname.find('.fits') < 0 and _fitsname is not None: # A non-FITS image was provided, and openImage made a copy # Update attributes to point to new copy instead self.geisname = self.rootname self.rootname = _fitsname # extract the extension ID being updated _root,_iextn = fileutil.parseFilename(self.rootname) _extn = fileutil.getExtn(fimg,_iextn) if not quiet: print('Updating archive WCS keywords for ',_fitsname) # Write out values to header... for key in self.orig_wcs.keys(): _comment = None _dkey = self.revert[key] # Verify that archive keywords will not be overwritten, # unless overwrite=yes. _old_key = key in _extn.header if _old_key == True and overwrite == no: if not quiet: print('WCS keyword',key,' already exists! Not overwriting.') continue # No archive keywords exist yet in file, or overwrite=yes... # Extract the value for the original keyword if _dkey in _extn.header: # Extract any comment string for the keyword as well _indx_key = _extn.header.index(_dkey) _full_key = _extn.header.cards[_indx_key] if not quiet: print('updating ',key,' with value of: ',self.orig_wcs[key]) _extn.header[key] = (self.orig_wcs[key], _full_key.comment) key = 'WCSCDATE' if key not in _extn.header: # Print out history keywords to record when these keywords # were backed up. _extn.header[key] = (self.orig_wcs[key], "Time WCS keywords were copied.") # Close the now updated image fimg.close() del fimg
Resets the WCS values to the original values stored in the backup keywords recorded in self.backup. def restoreWCS(self,prepend=None): """ Resets the WCS values to the original values stored in the backup keywords recorded in self.backup. """ # Open header for image image = self.rootname if prepend: _prepend = prepend elif self.prepend: _prepend = self.prepend else: _prepend = None # Open image as writable FITS object fimg = fileutil.openImage(image, mode='update') # extract the extension ID being updated _root,_iextn = fileutil.parseFilename(self.rootname) _extn = fileutil.getExtn(fimg,_iextn) if len(self.backup) > 0: # If it knows about the backup keywords already, # use this to restore the original values to the original keywords for newkey in self.revert.keys(): if newkey != 'opscale': _orig_key = self.revert[newkey] _extn.header[_orig_key] = _extn.header[newkey] elif _prepend: for key in self.wcstrans.keys(): # Get new keyword name based on old keyname # and prepend string if key != 'pixel scale': _okey = self._buildNewKeyname(key,_prepend) if _okey in _extn.header: _extn.header[key] = _extn.header[_okey] else: print('No original WCS values found. Exiting...') break else: print('No original WCS values found. Exiting...') fimg.close() del fimg
Write out the values of the WCS keywords to the NEW specified image 'fitsname'. def createReferenceWCS(self,refname,overwrite=yes): """ Write out the values of the WCS keywords to the NEW specified image 'fitsname'. """ hdu = self.createWcsHDU() # If refname already exists, delete it to make way for new file if os.path.exists(refname): if overwrite==yes: # Remove previous version and re-create with new header os.remove(refname) hdu.writeto(refname) else: # Append header to existing file wcs_append = True oldhdu = fits.open(refname, mode='append') for e in oldhdu: if 'extname' in e.header and e.header['extname'] == 'WCS': wcs_append = False if wcs_append == True: oldhdu.append(hdu) oldhdu.close() del oldhdu else: # No previous file, so generate new one from scratch hdu.writeto(refname) # Clean up del hdu
Generate a WCS header object that can be used to populate a reference WCS HDU. def createWcsHDU(self): """ Generate a WCS header object that can be used to populate a reference WCS HDU. """ hdu = fits.ImageHDU() hdu.header['EXTNAME'] = 'WCS' hdu.header['EXTVER'] = 1 # Now, update original image size information hdu.header['WCSAXES'] = (2, "number of World Coordinate System axes") hdu.header['NPIX1'] = (self.naxis1, "Length of array axis 1") hdu.header['NPIX2'] = (self.naxis2, "Length of array axis 2") hdu.header['PIXVALUE'] = (0.0, "values of pixels in array") # Write out values to header... excluded_keys = ['naxis1','naxis2'] for key in self.wcskeys: _dkey = self.wcstrans[key] if _dkey not in excluded_keys: hdu.header[key] = self.__dict__[_dkey] return hdu
The main routine. def main(args=None): """ The main routine. """ logger = cfg.configureLogger() for root, dirs, files in os.walk(cfg.dataDir): for dir in dirs: newDir = os.path.join(root, dir) try: os.removedirs(newDir) logger.info("Deleted empty dir " + str(newDir)) except: pass if cfg.useTwisted: import logging logger = logging.getLogger('analyser.twisted') from twisted.internet import reactor from twisted.web.resource import Resource from twisted.web import static, server from twisted.web.wsgi import WSGIResource from twisted.application import service from twisted.internet import endpoints class ReactApp: """ Handles the react app (excluding the static dir). """ def __init__(self, path): # TODO allow this to load when in debug mode even if the files don't exist self.publicFiles = {f: static.File(os.path.join(path, f)) for f in os.listdir(path) if os.path.exists(os.path.join(path, f))} self.indexHtml = ReactIndex(os.path.join(path, 'index.html')) def getFile(self, path): """ overrides getChild so it always just serves index.html unless the file does actually exist (i.e. is an icon or something like that) """ return self.publicFiles.get(path.decode('utf-8'), self.indexHtml) class ReactIndex(static.File): """ a twisted File which overrides getChild so it always just serves index.html (NB: this is a bit of a hack, there is probably a more correct way to do this but...) """ def getChild(self, path, request): return self class FlaskAppWrapper(Resource): """ wraps the flask app as a WSGI resource while allow the react index.html (and its associated static content) to be served as the default page. """ def __init__(self): super().__init__() self.wsgi = WSGIResource(reactor, reactor.getThreadPool(), app) import sys if getattr(sys, 'frozen', False): # pyinstaller lets you copy files to arbitrary locations under the _MEIPASS root dir uiRoot = sys._MEIPASS else: # release script moves the ui under the analyser package because setuptools doesn't seem to include # files from outside the package uiRoot = os.path.dirname(__file__) logger.info('Serving ui from ' + str(uiRoot)) self.react = ReactApp(os.path.join(uiRoot, 'ui')) self.static = static.File(os.path.join(uiRoot, 'ui', 'static')) def getChild(self, path, request): """ Overrides getChild to allow the request to be routed to the wsgi app (i.e. flask for the rest api calls), the static dir (i.e. for the packaged css/js etc), the various concrete files (i.e. the public dir from react-app) or to index.html (i.e. the react app) for everything else. :param path: :param request: :return: """ if path == b'api': request.prepath.pop() request.postpath.insert(0, path) return self.wsgi elif path == b'static': return self.static else: return self.react.getFile(path) def render(self, request): return self.wsgi.render(request) application = service.Application('analyser') site = server.Site(FlaskAppWrapper()) endpoint = endpoints.TCP4ServerEndpoint(reactor, cfg.getPort(), interface='0.0.0.0') endpoint.listen(site) reactor.run() else: # get config from a flask standard place not our config yml app.run(debug=cfg.runInDebug(), host='0.0.0.0', port=cfg.getPort())
Determine if this is a leap year in the FR calendar using one of three methods: 4, 100, 128 (every 4th years, every 4th or 400th but not 100th, every 4th but not 128th) def leap(year, method=None): ''' Determine if this is a leap year in the FR calendar using one of three methods: 4, 100, 128 (every 4th years, every 4th or 400th but not 100th, every 4th but not 128th) ''' method = method or 'equinox' if year in (3, 7, 11): return True elif year < 15: return False if method in (4, 'continuous') or (year <= 16 and method in (128, 'madler', 4, 'continuous')): return year % 4 == 3 elif method in (100, 'romme'): return (year % 4 == 0 and year % 100 != 0) or year % 400 == 0 elif method in (128, 'madler'): return year % 4 == 0 and year % 128 != 0 elif method == 'equinox': # Is equinox on 366th day after (year, 1, 1) startjd = to_jd(year, 1, 1, method='equinox') if premier_da_la_annee(startjd + 367) - startjd == 366.0: return True else: raise ValueError("Unknown leap year method. Try: continuous, romme, madler or equinox") return False
Determine the year in the French revolutionary calendar in which a given Julian day falls. Returns Julian day number containing fall equinox (first day of the FR year) def premier_da_la_annee(jd): '''Determine the year in the French revolutionary calendar in which a given Julian day falls. Returns Julian day number containing fall equinox (first day of the FR year)''' p = ephem.previous_fall_equinox(dublin.from_jd(jd)) previous = trunc(dublin.to_jd(p) - 0.5) + 0.5 if previous + 364 < jd: # test if current day is the equinox if the previous equinox was a long time ago n = ephem.next_fall_equinox(dublin.from_jd(jd)) nxt = trunc(dublin.to_jd(n) - 0.5) + 0.5 if nxt <= jd: return nxt return previous
Obtain Julian day from a given French Revolutionary calendar date. def to_jd(year, month, day, method=None): '''Obtain Julian day from a given French Revolutionary calendar date.''' method = method or 'equinox' if day < 1 or day > 30: raise ValueError("Invalid day for this calendar") if month > 13: raise ValueError("Invalid month for this calendar") if month == 13 and day > 5 + leap(year, method=method): raise ValueError("Invalid day for this month in this calendar") if method == 'equinox': return _to_jd_equinox(year, month, day) else: return _to_jd_schematic(year, month, day, method)
Calculate JD using various leap-year calculation methods def _to_jd_schematic(year, month, day, method): '''Calculate JD using various leap-year calculation methods''' y0, y1, y2, y3, y4, y5 = 0, 0, 0, 0, 0, 0 intercal_cycle_yrs, over_cycle_yrs, leap_suppression_yrs = None, None, None # Use the every-four-years method below year 16 (madler) or below 15 (romme) if ((method in (100, 'romme') and year < 15) or (method in (128, 'madler') and year < 17)): method = 4 if method in (4, 'continuous'): # Leap years: 15, 19, 23, ... y5 = -365 elif method in (100, 'romme'): year = year - 13 y5 = DAYS_IN_YEAR * 12 + 3 leap_suppression_yrs = 100. leap_suppression_days = 36524 # leap_cycle_days * 25 - 1 intercal_cycle_yrs = 400. intercal_cycle_days = 146097 # leap_suppression_days * 4 + 1 over_cycle_yrs = 4000. over_cycle_days = 1460969 # intercal_cycle_days * 10 - 1 elif method in (128, 'madler'): year = year - 17 y5 = DAYS_IN_YEAR * 16 + 4 leap_suppression_days = 46751 # 32 * leap_cycle_days - 1 leap_suppression_yrs = 128 else: raise ValueError("Unknown leap year method. Try: continuous, romme, madler or equinox") if over_cycle_yrs: y0 = trunc(year / over_cycle_yrs) * over_cycle_days year = year % over_cycle_yrs # count intercalary cycles in days (400 years long or None) if intercal_cycle_yrs: y1 = trunc(year / intercal_cycle_yrs) * intercal_cycle_days year = year % intercal_cycle_yrs # count leap suppresion cycles in days (100 or 128 years long) if leap_suppression_yrs: y2 = trunc(year / leap_suppression_yrs) * leap_suppression_days year = year % leap_suppression_yrs y3 = trunc(year / LEAP_CYCLE_YEARS) * LEAP_CYCLE_DAYS year = year % LEAP_CYCLE_YEARS # Adjust 'year' by one to account for lack of year 0 y4 = year * DAYS_IN_YEAR yj = y0 + y1 + y2 + y3 + y4 + y5 mj = (month - 1) * 30 return EPOCH + yj + mj + day - 1
Calculate date in the French Revolutionary calendar from Julian day. The five or six "sansculottides" are considered a thirteenth month in the results of this function. def from_jd(jd, method=None): '''Calculate date in the French Revolutionary calendar from Julian day. The five or six "sansculottides" are considered a thirteenth month in the results of this function.''' method = method or 'equinox' if method == 'equinox': return _from_jd_equinox(jd) else: return _from_jd_schematic(jd, method)
Convert from JD using various leap-year calculation methods def _from_jd_schematic(jd, method): '''Convert from JD using various leap-year calculation methods''' if jd < EPOCH: raise ValueError("Can't convert days before the French Revolution") # days since Epoch J = trunc(jd) + 0.5 - EPOCH y0, y1, y2, y3, y4, y5 = 0, 0, 0, 0, 0, 0 intercal_cycle_days = leap_suppression_days = over_cycle_days = None # Use the every-four-years method below year 17 if (J <= DAYS_IN_YEAR * 12 + 3 and method in (100, 'romme')) or (J <= DAYS_IN_YEAR * 17 + 4 and method in (128, 'madler')): method = 4 # set p and r in Hatcher algorithm if method in (4, 'continuous'): # Leap years: 15, 19, 23, ... # Reorganize so that leap day is last day of cycle J = J + 365 y5 = - 1 elif method in (100, 'romme'): # Year 15 is not a leap year # Year 16 is leap, then multiples of 4, not multiples of 100, yes multiples of 400 y5 = 12 J = J - DAYS_IN_YEAR * 12 - 3 leap_suppression_yrs = 100. leap_suppression_days = 36524 # LEAP_CYCLE_DAYS * 25 - 1 intercal_cycle_yrs = 400. intercal_cycle_days = 146097 # leap_suppression_days * 4 + 1 over_cycle_yrs = 4000. over_cycle_days = 1460969 # intercal_cycle_days * 10 - 1 elif method in (128, 'madler'): # Year 15 is a leap year, then year 20 and multiples of 4, not multiples of 128 y5 = 16 J = J - DAYS_IN_YEAR * 16 - 4 leap_suppression_yrs = 128 leap_suppression_days = 46751 # 32 * leap_cycle_days - 1 else: raise ValueError("Unknown leap year method. Try: continuous, romme, madler or equinox") if over_cycle_days: y0 = trunc(J / over_cycle_days) * over_cycle_yrs J = J % over_cycle_days if intercal_cycle_days: y1 = trunc(J / intercal_cycle_days) * intercal_cycle_yrs J = J % intercal_cycle_days if leap_suppression_days: y2 = trunc(J / leap_suppression_days) * leap_suppression_yrs J = J % leap_suppression_days y3 = trunc(J / LEAP_CYCLE_DAYS) * LEAP_CYCLE_YEARS if J % LEAP_CYCLE_DAYS == LEAP_CYCLE_DAYS - 1: J = 1460 else: J = J % LEAP_CYCLE_DAYS # 0 <= J <= 1460 # J needs to be 365 here on leap days ONLY y4 = trunc(J / DAYS_IN_YEAR) if J == DAYS_IN_YEAR * 4: y4 = y4 - 1 J = 365.0 else: J = J % DAYS_IN_YEAR year = y0 + y1 + y2 + y3 + y4 + y5 month = trunc(J / 30.) J = J - month * 30 return year + 1, month + 1, trunc(J) + 1
Calculate the FR day using the equinox as day 1 def _from_jd_equinox(jd): '''Calculate the FR day using the equinox as day 1''' jd = trunc(jd) + 0.5 equinoxe = premier_da_la_annee(jd) an = gregorian.from_jd(equinoxe)[0] - YEAR_EPOCH mois = trunc((jd - equinoxe) / 30.) + 1 jour = int((jd - equinoxe) % 30) + 1 return (an, mois, jour)
Obtain Julian day for Indian Civil date def to_jd(year, month, day): '''Obtain Julian day for Indian Civil date''' gyear = year + 78 leap = isleap(gyear) # // Is this a leap year ? # 22 - leap = 21 if leap, 22 non-leap start = gregorian.to_jd(gyear, 3, 22 - leap) if leap: Caitra = 31 else: Caitra = 30 if month == 1: jd = start + (day - 1) else: jd = start + Caitra m = month - 2 m = min(m, 5) jd += m * 31 if month >= 8: m = month - 7 jd += m * 30 jd += day - 1 return jd
Calculate Indian Civil date from Julian day Offset in years from Saka era to Gregorian epoch def from_jd(jd): '''Calculate Indian Civil date from Julian day Offset in years from Saka era to Gregorian epoch''' start = 80 # Day offset between Saka and Gregorian jd = trunc(jd) + 0.5 greg = gregorian.from_jd(jd) # Gregorian date for Julian day leap = isleap(greg[0]) # Is this a leap year? # Tentative year in Saka era year = greg[0] - SAKA_EPOCH # JD at start of Gregorian year greg0 = gregorian.to_jd(greg[0], 1, 1) yday = jd - greg0 # Day number (0 based) in Gregorian year if leap: Caitra = 31 # Days in Caitra this year else: Caitra = 30 if yday < start: # Day is at the end of the preceding Saka year year -= 1 yday += Caitra + (31 * 5) + (30 * 3) + 10 + start yday -= start if yday < Caitra: month = 1 day = yday + 1 else: mday = yday - Caitra if (mday < (31 * 5)): month = trunc(mday / 31) + 2 day = (mday % 31) + 1 else: mday -= 31 * 5 month = trunc(mday / 30) + 7 day = (mday % 30) + 1 return (year, month, int(day))
Returns a one-line string with the current callstack. def format_stack(skip=0, length=6, _sep=os.path.sep): """ Returns a one-line string with the current callstack. """ return ' < '.join("%s:%s:%s" % ( '/'.join(f.f_code.co_filename.split(_sep)[-2:]), f.f_lineno, f.f_code.co_name ) for f in islice(frame_iterator(sys._getframe(1 + skip)), length))
Decorates `func` to have logging. Args func (function): Function to decorate. If missing log returns a partial which you can use as a decorator. stacktrace (int): Number of frames to show. stacktrace_align (int): Column to align the framelist to. attributes (list): List of instance attributes to show, in case the function is a instance method. module (bool): Show the module. call (bool): If ``True``, then show calls. If ``False`` only show the call details on exceptions (if ``exception`` is enabled) (default: ``True``) call_args (bool): If ``True``, then show call arguments. (default: ``True``) call_args_repr (bool): Function to convert one argument to a string. (default: ``repr``) result (bool): If ``True``, then show result. (default: ``True``) exception (bool): If ``True``, then show exceptions. (default: ``True``) exception_repr (function): Function to convert an exception to a string. (default: ``repr``) result_repr (function): Function to convert the result object to a string. (default: ``strip_non_ascii`` - like ``str`` but nonascii characters are replaced with dots.) use_logging (string): Emit log messages with the given loglevel. (default: ``"CRITICAL"``) print_to (fileobject): File object to write to, in case you don't want to use logging module. (default: ``None`` - printing is disabled) Returns: A decorator or a wrapper. Example:: >>> @log(print_to=sys.stdout) ... def a(weird=False): ... if weird: ... raise RuntimeError('BOOM!') >>> a() a() <<< ... a => None >>> try: ... a(weird=True) ... except Exception: ... pass # naughty code! a(weird=True) <<< ... a ~ raised RuntimeError('BOOM!',) You can conveniently use this to logs just errors, or just results, example:: >>> import aspectlib >>> with aspectlib.weave(float, log(call=False, result=False, print_to=sys.stdout)): ... try: ... float('invalid') ... except Exception as e: ... pass # naughty code! float('invalid') <<< ... float ~ raised ValueError(...float...invalid...) This makes debugging naughty code easier. PS: Without the weaving it looks like this:: >>> try: ... log(call=False, result=False, print_to=sys.stdout)(float)('invalid') ... except Exception: ... pass # naughty code! float('invalid') <<< ... float ~ raised ValueError(...float...invalid...) .. versionchanged:: 0.5.0 Renamed `arguments` to `call_args`. Renamed `arguments_repr` to `call_args_repr`. Added `call` option. def log(func=None, stacktrace=10, stacktrace_align=60, attributes=(), module=True, call=True, call_args=True, call_args_repr=repr, result=True, exception=True, exception_repr=repr, result_repr=strip_non_ascii, use_logging='CRITICAL', print_to=None): """ Decorates `func` to have logging. Args func (function): Function to decorate. If missing log returns a partial which you can use as a decorator. stacktrace (int): Number of frames to show. stacktrace_align (int): Column to align the framelist to. attributes (list): List of instance attributes to show, in case the function is a instance method. module (bool): Show the module. call (bool): If ``True``, then show calls. If ``False`` only show the call details on exceptions (if ``exception`` is enabled) (default: ``True``) call_args (bool): If ``True``, then show call arguments. (default: ``True``) call_args_repr (bool): Function to convert one argument to a string. (default: ``repr``) result (bool): If ``True``, then show result. (default: ``True``) exception (bool): If ``True``, then show exceptions. (default: ``True``) exception_repr (function): Function to convert an exception to a string. (default: ``repr``) result_repr (function): Function to convert the result object to a string. (default: ``strip_non_ascii`` - like ``str`` but nonascii characters are replaced with dots.) use_logging (string): Emit log messages with the given loglevel. (default: ``"CRITICAL"``) print_to (fileobject): File object to write to, in case you don't want to use logging module. (default: ``None`` - printing is disabled) Returns: A decorator or a wrapper. Example:: >>> @log(print_to=sys.stdout) ... def a(weird=False): ... if weird: ... raise RuntimeError('BOOM!') >>> a() a() <<< ... a => None >>> try: ... a(weird=True) ... except Exception: ... pass # naughty code! a(weird=True) <<< ... a ~ raised RuntimeError('BOOM!',) You can conveniently use this to logs just errors, or just results, example:: >>> import aspectlib >>> with aspectlib.weave(float, log(call=False, result=False, print_to=sys.stdout)): ... try: ... float('invalid') ... except Exception as e: ... pass # naughty code! float('invalid') <<< ... float ~ raised ValueError(...float...invalid...) This makes debugging naughty code easier. PS: Without the weaving it looks like this:: >>> try: ... log(call=False, result=False, print_to=sys.stdout)(float)('invalid') ... except Exception: ... pass # naughty code! float('invalid') <<< ... float ~ raised ValueError(...float...invalid...) .. versionchanged:: 0.5.0 Renamed `arguments` to `call_args`. Renamed `arguments_repr` to `call_args_repr`. Added `call` option. """ loglevel = use_logging and ( logging._levelNames if hasattr(logging, '_levelNames') else logging._nameToLevel ).get(use_logging, logging.CRITICAL) _missing = object() def dump(buf): try: if use_logging: logger._log(loglevel, buf, ()) if print_to: buf += '\n' print_to.write(buf) except Exception as exc: logger.critical('Failed to log a message: %s', exc, exc_info=True) class __logged__(Aspect): __slots__ = 'cutpoint_function', 'final_function', 'binding', '__name__', '__weakref__' bind = False def __init__(self, cutpoint_function, binding=None): mimic(self, cutpoint_function) self.cutpoint_function = cutpoint_function self.final_function = super(__logged__, self).__call__(cutpoint_function) self.binding = binding def __get__(self, instance, owner): return __logged__(self.cutpoint_function.__get__(instance, owner), instance) def __call__(self, *args, **kwargs): return self.final_function(*args, **kwargs) def advising_function(self, *args, **kwargs): name = self.cutpoint_function.__name__ instance = self.binding if instance is not None: if isinstance(instance, InstanceType): instance_type = instance.__class__ else: instance_type = type(instance) info = [] for key in attributes: if key.endswith('()'): callarg = key = key.rstrip('()') else: callarg = False val = getattr(instance, key, _missing) if val is not _missing and key != name: info.append(' %s=%s' % ( key, call_args_repr(val() if callarg else val) )) sig = buf = '{%s%s%s}.%s' % ( instance_type.__module__ + '.' if module else '', instance_type.__name__, ''.join(info), name ) else: sig = buf = name if call_args: buf += '(%s%s)' % ( ', '.join(repr(i) for i in (args if call_args is True else args[:call_args])), ((', ' if args else '') + ', '.join('%s=%r' % i for i in kwargs.items())) if kwargs and call_args is True else '', ) if stacktrace: buf = ("%%-%ds <<< %%s" % stacktrace_align) % (buf, format_stack(skip=1, length=stacktrace)) if call: dump(buf) try: res = yield except Exception as exc: if exception: if not call: dump(buf) dump('%s ~ raised %s' % (sig, exception_repr(exc))) raise if result: dump('%s => %s' % (sig, result_repr(res))) if func: return __logged__(func) else: return __logged__
If the device is configured to run against a remote server, ping that device on a scheduled basis with our current state. :param cfg: the config object. :return: def wireHandlers(cfg): """ If the device is configured to run against a remote server, ping that device on a scheduled basis with our current state. :param cfg: the config object. :return: """ logger = logging.getLogger('recorder') httpPoster = cfg.handlers.get('remote') csvLogger = cfg.handlers.get('local') activeHandler = None if httpPoster is None: if csvLogger is None: logger.warning("App is running with discard handler only, ALL DATA WILL BE DISCARDED!!!") else: logger.info("App is running in standalone mode, logging data to local filesystem") activeHandler = csvLogger else: logger.info("App is running against remote server, logging data to " + httpPoster.target) activeHandler = httpPoster heartbeater.serverURL = httpPoster.target heartbeater.ping() if activeHandler is not None: for device in cfg.recordingDevices.values(): if activeHandler is httpPoster: httpPoster.deviceName = device.name copied = copy.copy(activeHandler) device.dataHandler = copied if not cfg.useAsyncHandlers else AsyncHandler('recorder', copied)
The main routine. def main(args=None): """ The main routine. """ cfg.configureLogger() wireHandlers(cfg) # get config from a flask standard place not our config yml app.run(debug=cfg.runInDebug(), host='0.0.0.0', port=cfg.getPort())
lists all known active measurements. def get(self, deviceId): """ lists all known active measurements. """ measurementsByName = self.measurements.get(deviceId) if measurementsByName is None: return [] else: return list(measurementsByName.values())
details the specific measurement. def get(self, deviceId, measurementId): """ details the specific measurement. """ record = self.measurements.get(deviceId) if record is not None: return record.get(measurementId) return None
Schedules a new measurement at the specified time. :param deviceId: the device to measure. :param measurementId: the name of the measurement. :return: 200 if it was scheduled, 400 if the device is busy, 500 if the device is bad. def put(self, deviceId, measurementId): """ Schedules a new measurement at the specified time. :param deviceId: the device to measure. :param measurementId: the name of the measurement. :return: 200 if it was scheduled, 400 if the device is busy, 500 if the device is bad. """ record = self.measurements.get(deviceId) if record is not None: measurement = record.get(measurementId) if measurement is not None: if len([x.name for x in measurement.statuses if x.name is 'COMPLETE' or x.name is 'FAILED']) > 0: logger.info('Overwriting existing completed measurement ' + x.name) measurement = None if measurement is None: logger.info('Initiating measurement ' + measurementId) measurement = ScheduledMeasurement(measurementId, self.recordingDevices.get(deviceId)) body = request.get_json() duration_ = body['duration'] def _cleanup(): logger.info('Removing ' + measurementId + ' from ' + deviceId) record.pop(measurementId) measurement.schedule(duration_, at=body.get('at'), delay=body.get('delay'), callback=_cleanup) # a quick hack to enable the measurement to be cleaned up by the ScheduledMeasurement record[measurementId] = measurement return measurement, 200 else: return measurement, 400 else: return 'unknown device ' + deviceId, 400
Deletes a stored measurement. :param deviceId: the device to measure. :param measurementId: the name of the measurement. :return: 200 if it was deleted, 400 if no such measurement (or device). def delete(self, deviceId, measurementId): """ Deletes a stored measurement. :param deviceId: the device to measure. :param measurementId: the name of the measurement. :return: 200 if it was deleted, 400 if no such measurement (or device). """ record = self.measurements.get(deviceId) if record is not None: popped = record.pop(measurementId, None) return popped, 200 if popped else 400 return None, 400
signals a stop for the given measurement. :param deviceId: the device to measure. :param measurementId: the name of the measurement. :return: 200 if stop is signalled, 400 if it doesn't exist or is not running. def get(self, deviceId, measurementId): """ signals a stop for the given measurement. :param deviceId: the device to measure. :param measurementId: the name of the measurement. :return: 200 if stop is signalled, 400 if it doesn't exist or is not running. """ record = self.measurements.get(deviceId) if record is not None: measurement = record.get(measurementId) if measurement.recording: device = self.recordingDevices.get(deviceId) device.signalStop() return measurement, 200 else: return measurement, 400 return '', 400
schedules the measurement (to execute asynchronously). :param duration: how long to run for. :param at: the time to start at. :param delay: the time to wait til starting (use at or delay). :param callback: a callback. :return: nothing. def schedule(self, duration, at=None, delay=None, callback=None): """ schedules the measurement (to execute asynchronously). :param duration: how long to run for. :param at: the time to start at. :param delay: the time to wait til starting (use at or delay). :param callback: a callback. :return: nothing. """ delay = self.calculateDelay(at, delay) self.callback = callback logger.info('Initiating measurement ' + self.name + ' for ' + str(duration) + 's in ' + str(delay) + 's') self.statuses.append({'name': ScheduledMeasurementStatus.SCHEDULED.name, 'time': datetime.utcnow()}) threading.Timer(delay, self.execute, [duration]).start()
Executes the measurement, recording the event status. :param duration: the time to run for. :return: nothing. def execute(self, duration): """ Executes the measurement, recording the event status. :param duration: the time to run for. :return: nothing. """ self.statuses.append({'name': ScheduledMeasurementStatus.RUNNING.name, 'time': datetime.utcnow()}) try: self.recording = True self.device.start(self.name, durationInSeconds=duration) finally: self.recording = False if self.device.status == RecordingDeviceStatus.FAILED: self.statuses.append({'name': ScheduledMeasurementStatus.FAILED.name, 'time': datetime.utcnow(), 'reason': self.device.failureCode}) else: self.statuses.append({'name': ScheduledMeasurementStatus.COMPLETE.name, 'time': datetime.utcnow()}) # this is a bit of a hack, need to remove this at some point by refactoring the way measurements are stored if self.callback is not None: self.callback()
Creates the delay from now til the specified start time, uses "at" if available. :param at: the start time in %a %b %d %H:%M:%S %Y format. :param delay: the delay from now til start. :return: the delay. def calculateDelay(self, at, delay): """ Creates the delay from now til the specified start time, uses "at" if available. :param at: the start time in %a %b %d %H:%M:%S %Y format. :param delay: the delay from now til start. :return: the delay. """ if at is not None: return max((datetime.strptime(at, DATETIME_FORMAT) - datetime.utcnow()).total_seconds(), 0) elif delay is not None: return delay else: return 0
Given a list of multiprocessing.Process objects which have not yet been started, this function launches them and blocks until the last finishes. This makes sure that only <pool_size> processes are ever working at any one time (this number does not include the main process which called this function, since that will not tax the CPU). The idea here is roughly analogous to multiprocessing.Pool with the exceptions that: 1 - The caller will get to use the multiprocessing.Process model of using shared memory (inheritance) to pass arg data to the child, 2 - maxtasksperchild is always 1, 3 - no function return value is kept/tranferred (not yet implemented) def launch_and_wait(mp_proc_list, pool_size): """ Given a list of multiprocessing.Process objects which have not yet been started, this function launches them and blocks until the last finishes. This makes sure that only <pool_size> processes are ever working at any one time (this number does not include the main process which called this function, since that will not tax the CPU). The idea here is roughly analogous to multiprocessing.Pool with the exceptions that: 1 - The caller will get to use the multiprocessing.Process model of using shared memory (inheritance) to pass arg data to the child, 2 - maxtasksperchild is always 1, 3 - no function return value is kept/tranferred (not yet implemented) """ # Sanity check if len(mp_proc_list) < 1: return # Create or own list with easy state watching procs = [] for p in mp_proc_list: procs.append(WatchedProcess(p)) # Launch all of them, but only so pool_size are running at any time keep_going = True while (keep_going): # Before we start any more, find out how many are running. First go # through the list of those started and see if alive. Update state. for p in procs: if p.state == 1: # been started if not p.process.is_alive(): p.state = 2 # process has finished or been terminated assert p.process.exitcode is not None, \ "Process is not alive but has no exitcode? "+ \ str(p.process) # now figure num_running num_running = len([p for p in procs if p.state == 1]) # Start some. Only as many as pool_size should ever be running. num_avail_cpus = pool_size - num_running num_to_start = len([p for p in procs if p.state == 0]) if num_to_start < 1: # all have been started, can finally leave loop and go wait break if num_avail_cpus > 0 and num_to_start > 0: num_to_start_now = min(num_avail_cpus, num_to_start) started_now = 0 for p in procs: if started_now < num_to_start_now and p.state == 0: p.start_process() # debug "launch_and_wait: started: "+str(p.process) started_now += 1 # else: otherwise, all cpus are in use, just wait ... # sleep to tame loop activity, but also must sleep a bit after each # start call so that the call to is_alive() woorks correctly time.sleep(1) # Out of the launching loop, can now wait on all procs left. for p in procs: p.join_process() # Check all exit codes before returning for p in procs: if 0 != p.process.exitcode: raise RuntimeError("Problem during: "+str(p.process.name)+ \ ', exitcode: '+str(p.process.exitcode)+'. Check log.')
Determine and return the best layout of "tiles" for fastest overall parallel processing of a rectangular image broken up into N smaller equally-sized rectangular tiles, given as input the number of processes/chunks which can be run/worked at the same time (pool_size). This attempts to return a layout whose total number of tiles is as close as possible to pool_size, without going over (and thus not really taking advantage of pooling). Since we can vary the size of the rectangles, there is not much (any?) benefit to pooling. Returns a tuple of ( <num tiles in X dir>, <num in Y direction> ) This assumes the image in question is relatively close to square, and so the returned tuple attempts to give a layout which is as squarishly-blocked as possible, except in cases where speed would be sacrificed. EXAMPLES: For pool_size of 4, the best result is 2x2. For pool_size of 6, the best result is 2x3. For pool_size of 5, a result of 1x5 is better than a result of 2x2 (which would leave one core unused), and 1x5 is also better than a result of 2x3 (which would require one core to work twice while all others wait). For higher, odd pool_size values (say 39), it is deemed best to sacrifice a few unused cores to satisfy our other constraints, and thus the result of 6x6 is best (giving 36 tiles and 3 unused cores). def best_tile_layout(pool_size): """ Determine and return the best layout of "tiles" for fastest overall parallel processing of a rectangular image broken up into N smaller equally-sized rectangular tiles, given as input the number of processes/chunks which can be run/worked at the same time (pool_size). This attempts to return a layout whose total number of tiles is as close as possible to pool_size, without going over (and thus not really taking advantage of pooling). Since we can vary the size of the rectangles, there is not much (any?) benefit to pooling. Returns a tuple of ( <num tiles in X dir>, <num in Y direction> ) This assumes the image in question is relatively close to square, and so the returned tuple attempts to give a layout which is as squarishly-blocked as possible, except in cases where speed would be sacrificed. EXAMPLES: For pool_size of 4, the best result is 2x2. For pool_size of 6, the best result is 2x3. For pool_size of 5, a result of 1x5 is better than a result of 2x2 (which would leave one core unused), and 1x5 is also better than a result of 2x3 (which would require one core to work twice while all others wait). For higher, odd pool_size values (say 39), it is deemed best to sacrifice a few unused cores to satisfy our other constraints, and thus the result of 6x6 is best (giving 36 tiles and 3 unused cores). """ # Easy answer sanity-checks if pool_size < 2: return (1, 1) # Next, use a small mapping of hard-coded results. While we agree # that many of these are unlikely pool_size values, they are easy # to accomodate. mapping = { 0:(1,1), 1:(1,1), 2:(1,2), 3:(1,3), 4:(2,2), 5:(1,5), 6:(2,3), 7:(2,3), 8:(2,4), 9:(3,3), 10:(2,5), 11:(2,5), 14:(2,7), 18:(3,6), 19:(3,6), 28:(4,7), 29:(4,7), 32:(4,8), 33:(4,8), 34:(4,8), 40:(4,10), 41:(4,10) } if pool_size in mapping: return mapping[pool_size] # Next, take a guess using the square root and (for the sake of # simplicity), go with it. We *could* get much fancier here... # Use floor-rounding (not ceil) so that the total number of resulting # tiles is <= pool_size. xnum = int(math.sqrt(pool_size)) ynum = int((1.*pool_size)/xnum) return (xnum, ynum)
Work loop runs forever (or until running is False) :return: def _accept(self): """ Work loop runs forever (or until running is False) :return: """ logger.warning("Reactor " + self._name + " is starting") while self.running: try: self._completeTask() except: logger.exception("Unexpected exception during request processing") logger.warning("Reactor " + self._name + " is terminating")
public interface to the reactor. :param requestType: :param args: :return: def offer(self, requestType, *args): """ public interface to the reactor. :param requestType: :param args: :return: """ if self._funcsByRequest.get(requestType) is not None: self._workQueue.put((requestType, list(*args))) else: logger.error("Ignoring unknown request on reactor " + self._name + " " + requestType)
Called when this button is clicked. Execute code from .cfgspc def clicked(self): """ Called when this button is clicked. Execute code from .cfgspc """ try: from . import teal except: teal = None try: # start drilling down into the tpo to get the code tealGui = self._mainGuiObj tealGui.showStatus('Clicked "'+self.getButtonLabel()+'"', keep=1) pscope = self.paramInfo.scope pname = self.paramInfo.name tpo = tealGui._taskParsObj tup = tpo.getExecuteStrings(pscope, pname) code = '' if not tup: if teal: teal.popUpErr(tealGui.top, "No action to perform", "Action Button Error") return for exname in tup: if '_RULES_' in tpo and exname in tpo['_RULES_'].configspec: ruleSig = tpo['_RULES_'].configspec[exname] chkArgsDict = vtor_checks.sigStrToKwArgsDict(ruleSig) code = chkArgsDict.get('code') # a string or None # now go ahead and execute it teal.execEmbCode(pscope, pname, self.getButtonLabel(), tealGui, code) # done tealGui.debug('Finished: "'+self.getButtonLabel()+'"') except Exception as ex: msg = 'Error executing: "'+self.getButtonLabel()+'"\n'+ex.message msgFull = msg+'\n'+''.join(traceback.format_exc()) msgFull+= "CODE:\n"+code if tealGui: if teal: teal.popUpErr(tealGui.top, msg, "Action Button Error") tealGui.debug(msgFull) else: if teal: teal.popUpErr(None, msg, "Action Button Error") print(msgFull)
This is used to ensure that the return value of arr.tostring() is actually a string. This will prevent lots of if-checks in calling code. As of numpy v1.6.1 (in Python 3.2.3), the tostring() function still returns type 'bytes', not 'str' as it advertises. def ndarr2str(arr, encoding='ascii'): """ This is used to ensure that the return value of arr.tostring() is actually a string. This will prevent lots of if-checks in calling code. As of numpy v1.6.1 (in Python 3.2.3), the tostring() function still returns type 'bytes', not 'str' as it advertises. """ # be fast, don't check - just assume 'arr' is a numpy array - the tostring # call will fail anyway if not retval = arr.tostring() # would rather check "if isinstance(retval, bytes)", but support 2.5. # could rm the if PY3K check, but it makes this faster on 2.x. if PY3K and not isinstance(retval, str): return retval.decode(encoding) else: # is str return retval
This is used to ensure that the return value of arr.tostring() is actually a *bytes* array in PY3K. See notes in ndarr2str above. Even though we consider it a bug that numpy's tostring() function returns a bytes array in PY3K, there are actually many instances where that is what we want - bytes, not unicode. So we use this function in those instances to ensure that when/if this numpy "bug" is "fixed", that our calling code still gets bytes where it needs/expects them. def ndarr2bytes(arr, encoding='ascii'): """ This is used to ensure that the return value of arr.tostring() is actually a *bytes* array in PY3K. See notes in ndarr2str above. Even though we consider it a bug that numpy's tostring() function returns a bytes array in PY3K, there are actually many instances where that is what we want - bytes, not unicode. So we use this function in those instances to ensure that when/if this numpy "bug" is "fixed", that our calling code still gets bytes where it needs/expects them. """ # be fast, don't check - just assume 'arr' is a numpy array - the tostring # call will fail anyway if not retval = arr.tostring() # would rather check "if not isinstance(retval, bytes)", but support 2.5. if PY3K and isinstance(retval, str): # Take note if this ever gets used. If this ever occurs, it # is likely wildly inefficient since numpy.tostring() is now # returning unicode and numpy surely has a tobytes() func by now. # If so, add a code path to call its tobytes() func at our start. return retval.encode(encoding) else: # is str==bytes in 2.x return retval
Convert string s to the 'bytes' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, this is technically the same as the str type in terms of the character data in memory. def tobytes(s, encoding='ascii'): """ Convert string s to the 'bytes' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, this is technically the same as the str type in terms of the character data in memory. """ # NOTE: after we abandon 2.5, we might simply instead use "bytes(s)" # NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b' if PY3K: if isinstance(s, bytes): return s else: return s.encode(encoding) else: # for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes # but handle if unicode is passed if isinstance(s, unicode): return s.encode(encoding) else: return s
Convert string-like-thing s to the 'str' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, str and bytes are the same type. In Python 3+, this may require a decoding step. def tostr(s, encoding='ascii'): """ Convert string-like-thing s to the 'str' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, str and bytes are the same type. In Python 3+, this may require a decoding step. """ if PY3K: if isinstance(s, str): # str == unicode in PY3K return s else: # s is type bytes return s.decode(encoding) else: # for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes # but handle if unicode is passed if isinstance(s, unicode): return s.encode(encoding) else: return s
Decorator that retries the call ``retries`` times if ``func`` raises ``exceptions``. Can use a ``backoff`` function to sleep till next retry. Example:: >>> should_fail = lambda foo=[1,2,3]: foo and foo.pop() >>> @retry ... def flaky_func(): ... if should_fail(): ... raise OSError('Tough luck!') ... print("Success!") ... >>> flaky_func() Success! If it reaches the retry limit:: >>> @retry ... def bad_func(): ... raise OSError('Tough luck!') ... >>> bad_func() Traceback (most recent call last): ... OSError: Tough luck! def retry(func=None, retries=5, backoff=None, exceptions=(IOError, OSError, EOFError), cleanup=None, sleep=time.sleep): """ Decorator that retries the call ``retries`` times if ``func`` raises ``exceptions``. Can use a ``backoff`` function to sleep till next retry. Example:: >>> should_fail = lambda foo=[1,2,3]: foo and foo.pop() >>> @retry ... def flaky_func(): ... if should_fail(): ... raise OSError('Tough luck!') ... print("Success!") ... >>> flaky_func() Success! If it reaches the retry limit:: >>> @retry ... def bad_func(): ... raise OSError('Tough luck!') ... >>> bad_func() Traceback (most recent call last): ... OSError: Tough luck! """ @Aspect(bind=True) def retry_aspect(cutpoint, *args, **kwargs): for count in range(retries + 1): try: if count and cleanup: cleanup(*args, **kwargs) yield break except exceptions as exc: if count == retries: raise if not backoff: timeout = 0 elif isinstance(backoff, (int, float)): timeout = backoff else: timeout = backoff(count) logger.exception("%s(%s, %s) raised exception %s. %s retries left. Sleeping %s secs.", cutpoint.__name__, args, kwargs, exc, retries - count, timeout) sleep(timeout) return retry_aspect if func is None else retry_aspect(func)
Loads the named entry from the upload cache as a signal. :param name: the name. :param start: the time to start from in HH:mm:ss.SSS format :param end: the time to end at in HH:mm:ss.SSS format. :return: the signal if the named upload exists. def loadSignal(self, name, start=None, end=None): """ Loads the named entry from the upload cache as a signal. :param name: the name. :param start: the time to start from in HH:mm:ss.SSS format :param end: the time to end at in HH:mm:ss.SSS format. :return: the signal if the named upload exists. """ entry = self._getCacheEntry(name) if entry is not None: from analyser.common.signal import loadSignalFromWav return loadSignalFromWav(entry['path'], start=start, end=end) else: return None
:param name: the name of the cache entry. :return: the entry or none. def _getCacheEntry(self, name): """ :param name: the name of the cache entry. :return: the entry or none. """ return next((x for x in self._uploadCache if x['name'] == name), None)
Moves a tmp file to the upload dir, resampling it if necessary, and then deleting the tmp entries. :param tmpCacheEntry: the cache entry. :return: def _convertTmp(self, tmpCacheEntry): """ Moves a tmp file to the upload dir, resampling it if necessary, and then deleting the tmp entries. :param tmpCacheEntry: the cache entry. :return: """ from analyser.common.signal import loadSignalFromWav tmpCacheEntry['status'] = 'converting' logger.info("Loading " + tmpCacheEntry['path']) signal = loadSignalFromWav(tmpCacheEntry['path']) logger.info("Loaded " + tmpCacheEntry['path']) if Path(tmpCacheEntry['path']).exists(): logger.info('Deleting ' + tmpCacheEntry['path']) os.remove(tmpCacheEntry['path']) else: logger.warning('Tmp cache file does not exist: ' + tmpCacheEntry['path']) self._tmpCache.remove(tmpCacheEntry) self._conversionCache.append(tmpCacheEntry) srcFs = signal.fs completeSamples = signal.samples outputFileName = os.path.join(self._uploadDir, tmpCacheEntry['name']) if srcFs > 1024: self.writeOutput(outputFileName, completeSamples, srcFs, 1000) else: self.writeOutput(outputFileName, completeSamples, srcFs, srcFs) tmpCacheEntry['status'] = 'loaded' self._conversionCache.remove(tmpCacheEntry) self._uploadCache.append(self._extractMeta(outputFileName, 'loaded'))
Streams an uploaded chunk to a file. :param stream: the binary stream that contains the file. :param filename: the name of the file. :param chunkIdx: optional chunk index (for writing to a tmp dir) :return: no of bytes written or -1 if there was an error. def writeChunk(self, stream, filename, chunkIdx=None): """ Streams an uploaded chunk to a file. :param stream: the binary stream that contains the file. :param filename: the name of the file. :param chunkIdx: optional chunk index (for writing to a tmp dir) :return: no of bytes written or -1 if there was an error. """ import io more = True outputFileName = filename if chunkIdx is None else filename + '.' + str(chunkIdx) outputDir = self._uploadDir if chunkIdx is None else self._tmpDir chunkFilePath = os.path.join(outputDir, outputFileName) if os.path.exists(chunkFilePath) and os.path.isfile(chunkFilePath): logger.error('Uploaded file already exists: ' + chunkFilePath) return -1 else: chunkFile = open(chunkFilePath, 'xb') count = 0 while more: chunk = stream.read(io.DEFAULT_BUFFER_SIZE) chunkLen = len(chunk) count += chunkLen if chunkLen == 0: more = False else: chunkFile.write(chunk) return count
Completes the upload which means converting to a single 1kHz sample rate file output file. :param filename: :param totalChunks: :param status: :return: def finalise(self, filename, totalChunks, status): """ Completes the upload which means converting to a single 1kHz sample rate file output file. :param filename: :param totalChunks: :param status: :return: """ def getChunkIdx(x): try: return int(x.suffix[1:]) except ValueError: return -1 def isChunkFile(x): return x.is_file() and -1 < getChunkIdx(x) <= totalChunks asSingleFile = os.path.join(self._tmpDir, filename) if status.lower() == 'true': chunks = [(getChunkIdx(file), str(file)) for file in Path(self._tmpDir).glob(filename + '.*') if isChunkFile(file)] # TODO if len(chunks) != totalChunks then error with open(asSingleFile, 'xb') as wfd: for f in [x[1] for x in sorted(chunks, key=lambda tup: tup[0])]: with open(f, 'rb') as fd: logger.info("cat " + f + " with " + asSingleFile) shutil.copyfileobj(fd, wfd, 1024 * 1024 * 10) self.cleanupChunks(filename, isChunkFile, status)
Resamples the signal to the targetFs and writes it to filename. :param filename: the filename. :param signal: the signal to resample. :param targetFs: the target fs. :return: None def writeOutput(self, filename, samples, srcFs, targetFs): """ Resamples the signal to the targetFs and writes it to filename. :param filename: the filename. :param signal: the signal to resample. :param targetFs: the target fs. :return: None """ import librosa inputLength = samples.shape[-1] if srcFs != targetFs: if inputLength < targetFs: logger.info("Input signal is too short (" + str(inputLength) + " samples) for resampling to " + str(targetFs) + "Hz") outputSamples = samples targetFs = srcFs else: logger.info("Resampling " + str(inputLength) + " samples from " + str(srcFs) + "Hz to " + str(targetFs) + "Hz") outputSamples = librosa.resample(samples, srcFs, targetFs, res_type='kaiser_fast') else: outputSamples = samples logger.info("Writing output to " + filename) maxv = np.iinfo(np.int32).max librosa.output.write_wav(filename, (outputSamples * maxv).astype(np.int32), targetFs) logger.info("Output written to " + filename)
Deletes the named entry. :param name: the entry. :return: the deleted entry. def delete(self, name): """ Deletes the named entry. :param name: the entry. :return: the deleted entry. """ i, entry = next(((i, x) for i, x in enumerate(self._uploadCache) if x['name'] == name), (None, None)) if entry is not None: logger.info("Deleting " + name) os.remove(str(entry['path'])) del self._uploadCache[i] return entry else: logger.info("Unable to delete " + name + ", not found") return None
Return EparOption item of appropriate type for the parameter param def eparOptionFactory(master, statusBar, param, defaultParam, doScroll, fieldWidths, plugIn=None, editedCallbackObj=None, helpCallbackObj=None, mainGuiObj=None, defaultsVerb="Default", bg=None, indent=False, flagging=False, flaggedColor=None): """Return EparOption item of appropriate type for the parameter param""" # Allow passed-in overrides if plugIn is not None: eparOption = plugIn # If there is an enumerated list, regardless of datatype use EnumEparOption elif param.choice is not None: eparOption = EnumEparOption else: # Use String for types not in the dictionary eparOption = _eparOptionDict.get(param.type, StringEparOption) # Create it eo = eparOption(master, statusBar, param, defaultParam, doScroll, fieldWidths, defaultsVerb, bg, indent=indent, helpCallbackObj=helpCallbackObj, mainGuiObj=mainGuiObj) eo.setEditedCallbackObj(editedCallbackObj) eo.setIsFlagging(flagging, False) if flaggedColor: eo.setFlaggedColor(flaggedColor) return eo
Collect in 1 place the bindings needed for watchTextSelection() def extraBindingsForSelectableText(self): """ Collect in 1 place the bindings needed for watchTextSelection() """ # See notes in watchTextSelection self.entry.bind('<FocusIn>', self.watchTextSelection, "+") self.entry.bind('<ButtonRelease-1>', self.watchTextSelection, "+") self.entry.bind('<B1-Motion>', self.watchTextSelection, "+") self.entry.bind('<Shift_L>', self.watchTextSelection, "+") self.entry.bind('<Left>', self.watchTextSelection, "+") self.entry.bind('<Right>', self.watchTextSelection, "+")
Clear selection (if text is selected in this widget) def focusOut(self, event=None): """Clear selection (if text is selected in this widget)""" # do nothing if this isn't a text-enabled widget if not self.isSelectable: return if self.entryCheck(event) is None: # Entry value is OK # Save the last selection so it can be restored if we # come right back to this widget. Then clear the selection # before moving on. entry = self.entry try: if not entry.selection_present(): self.lastSelection = None else: self.lastSelection = (entry.index(SEL_FIRST), entry.index(SEL_LAST)) except AttributeError: pass if USING_X and sys.platform == 'darwin': pass # do nothing here - we need it left selected for cut/paste else: entry.selection_clear() else: return "break"
Callback used to see if there is a new text selection. In certain cases we manually add the text to the clipboard (though on most platforms the correct behavior happens automatically). def watchTextSelection(self, event=None): """ Callback used to see if there is a new text selection. In certain cases we manually add the text to the clipboard (though on most platforms the correct behavior happens automatically). """ # Note that this isn't perfect - it is a key click behind when # selections are made via shift-arrow. If this becomes important, it # can likely be fixed with after(). if self.entry.selection_present(): # entry must be text entry type i1 = self.entry.index(SEL_FIRST) i2 = self.entry.index(SEL_LAST) if i1 >= 0 and i2 >= 0 and i2 > i1: sel = self.entry.get()[i1:i2] # Add to clipboard on platforms where necessary. print('selected: "'+sel+'"')
Select all text (if applicable) on taking focus def focusIn(self, event=None): """Select all text (if applicable) on taking focus""" try: # doScroll returns false if the call was ignored because the # last call also came from this widget. That avoids unwanted # scrolls and text selection when the focus moves in and out # of the window. if self.doScroll(event): self.entry.selection_range(0, END) # select all text in widget else: # restore selection to what it was on the last FocusOut if self.lastSelection: self.entry.selection_range(*self.lastSelection) except AttributeError: pass
A general method for firing any applicable triggers when a value has been set. This is meant to be easily callable from any part of this class (or its subclasses), so that it can be called as soon as need be (immed. on click?). This is smart enough to be called multiple times, itself handling the removal of any/all duplicate successive calls (unless skipDups is False). If val is None, it will use the GUI entry's current value via choice.get(). See teal.py for a description of action. def widgetEdited(self, event=None, val=None, action='entry', skipDups=True): """ A general method for firing any applicable triggers when a value has been set. This is meant to be easily callable from any part of this class (or its subclasses), so that it can be called as soon as need be (immed. on click?). This is smart enough to be called multiple times, itself handling the removal of any/all duplicate successive calls (unless skipDups is False). If val is None, it will use the GUI entry's current value via choice.get(). See teal.py for a description of action. """ # be as lightweight as possible if obj doesn't care about this stuff if not self._editedCallbackObj and not self._flagNonDefaultVals: return # get the current value curVal = val # take this first, if it is given if curVal is None: curVal = self.choice.get() # do any flagging self.flagThisPar(curVal, False) # see if this is a duplicate successive call for the same value if skipDups and curVal==self._lastWidgetEditedVal: return # pull trigger if not self._editedCallbackObj: return self._editedCallbackObj.edited(self.paramInfo.scope, self.paramInfo.name, self.previousValue, curVal, action) # for our duplicate checker self._lastWidgetEditedVal = curVal
Popup right-click menu of special parameter operations Relies on browserEnabled, clearEnabled, unlearnEnabled, helpEnabled instance attributes to determine which items are available. def popupChoices(self, event=None): """Popup right-click menu of special parameter operations Relies on browserEnabled, clearEnabled, unlearnEnabled, helpEnabled instance attributes to determine which items are available. """ # don't bother if all items are disabled if NORMAL not in (self.browserEnabled, self.clearEnabled, self.unlearnEnabled, self.helpEnabled): return self.menu = Menu(self.entry, tearoff = 0) if self.browserEnabled != DISABLED: # Handle file and directory in different functions (tkFileDialog) if capable.OF_TKFD_IN_EPAR: self.menu.add_command(label = "File Browser", state = self.browserEnabled, command = self.fileBrowser) self.menu.add_command(label = "Directory Browser", state = self.browserEnabled, command = self.dirBrowser) # Handle file and directory in the same function (filedlg) else: self.menu.add_command(label = "File/Directory Browser", state = self.browserEnabled, command = self.fileBrowser) self.menu.add_separator() self.menu.add_command(label = "Clear", state = self.clearEnabled, command = self.clearEntry) self.menu.add_command(label = self.defaultsVerb, state = self.unlearnEnabled, command = self.unlearnValue) self.menu.add_command(label = 'Help', state = self.helpEnabled, command = self.helpOnParam) # Get the current y-coordinate of the Entry ycoord = self.entry.winfo_rooty() # Get the current x-coordinate of the cursor xcoord = self.entry.winfo_pointerx() - XSHIFT # Display the Menu as a popup as it is not associated with a Button self.menu.tk_popup(xcoord, ycoord)
Invoke a tkinter file dialog def fileBrowser(self): """Invoke a tkinter file dialog""" if capable.OF_TKFD_IN_EPAR: fname = askopenfilename(parent=self.entry, title="Select File") else: from . import filedlg self.fd = filedlg.PersistLoadFileDialog(self.entry, "Select File", "*") if self.fd.Show() != 1: self.fd.DialogCleanup() return fname = self.fd.GetFileName() self.fd.DialogCleanup() if not fname: return # canceled self.choice.set(fname) # don't select when we go back to widget to reduce risk of # accidentally typing over the filename self.lastSelection = None
Invoke a tkinter directory dialog def dirBrowser(self): """Invoke a tkinter directory dialog""" if capable.OF_TKFD_IN_EPAR: fname = askdirectory(parent=self.entry, title="Select Directory") else: raise NotImplementedError('Fix popupChoices() logic.') if not fname: return # canceled self.choice.set(fname) # don't select when we go back to widget to reduce risk of # accidentally typing over the filename self.lastSelection = None
Force-set a parameter entry to the given value def forceValue(self, newVal, noteEdited=False): """Force-set a parameter entry to the given value""" if newVal is None: newVal = "" self.choice.set(newVal) if noteEdited: self.widgetEdited(val=newVal, skipDups=False)
Unlearn a parameter value by setting it back to its default def unlearnValue(self): """Unlearn a parameter value by setting it back to its default""" defaultValue = self.defaultParamInfo.get(field = "p_filename", native = 0, prompt = 0) self.choice.set(defaultValue)
Use this to enable or disable (grey out) a parameter. def setActiveState(self, active): """ Use this to enable or disable (grey out) a parameter. """ st = DISABLED if active: st = NORMAL self.entry.configure(state=st) self.inputLabel.configure(state=st) self.promptLabel.configure(state=st)
If this par's value is different from the default value, it is here that we flag it somehow as such. This basic version simply makes the surrounding text red (or returns it to normal). May be overridden. Leave force at False if you want to allow this mehtod to make smart time-saving decisions about when it can skip recoloring because it is already the right color. Set force to true if you think we got out of sync and need to be fixed. def flagThisPar(self, currentVal, force): """ If this par's value is different from the default value, it is here that we flag it somehow as such. This basic version simply makes the surrounding text red (or returns it to normal). May be overridden. Leave force at False if you want to allow this mehtod to make smart time-saving decisions about when it can skip recoloring because it is already the right color. Set force to true if you think we got out of sync and need to be fixed. """ # Get out ASAP if we can if (not force) and (not self._flagNonDefaultVals): return # handle simple case before comparing values (quick return) if force and not self._flagNonDefaultVals: self._flagged = False self.promptLabel.configure(fg="black") return # Get/format values to compare currentNative = self.convertToNative(currentVal) defaultNative = self.convertToNative(self.defaultParamInfo.value) # par.value is same as par.get(native=1,prompt=0) # flag or unflag as needed if currentNative != defaultNative: if not self._flagged or force: self._flagged = True self.promptLabel.configure(fg=self._flaggedColor) # was red else: # same as def if self._flagged or force: self._flagged = False self.promptLabel.configure(fg="black")
Allow keys typed in widget to select items def keypress(self, event): """Allow keys typed in widget to select items""" try: self.choice.set(self.shortcuts[event.keysym]) except KeyError: # key not found (probably a bug, since we intend to catch # only events from shortcut keys, but ignore it anyway) pass
Make sure proper entry is activated when menu is posted def postcmd(self): """Make sure proper entry is activated when menu is posted""" value = self.choice.get() try: index = self.paramInfo.choice.index(value) self.entry.menu.activate(index) except ValueError: # initial null value may not be in list pass
Convert to native bool; interpret certain strings. def convertToNative(self, aVal): """ Convert to native bool; interpret certain strings. """ if aVal is None: return None if isinstance(aVal, bool): return aVal # otherwise interpret strings return str(aVal).lower() in ('1','on','yes','true')
Toggle value between Yes and No def toggle(self, event=None): """Toggle value between Yes and No""" if self.choice.get() == "yes": self.rbno.select() else: self.rbyes.select() self.widgetEdited()
Ensure any INDEF entry is uppercase, before base class behavior def entryCheck(self, event = None, repair = True): """ Ensure any INDEF entry is uppercase, before base class behavior """ valupr = self.choice.get().upper() if valupr.strip() == 'INDEF': self.choice.set(valupr) return EparOption.entryCheck(self, event, repair = repair)
:Purpose: Interpolates y based on the given xval. x and y are a pair of independent/dependent variable arrays that must be the same length. The x array must also be sorted. xval is a user-specified value. This routine looks up xval in the x array and uses that information to properly interpolate the value in the y array. Notes ===== Use the searchsorted method on the X array to determine the bin in which xval falls; then use that information to compute the corresponding y value. See Also ======== numpy Parameters ========== x: 1D numpy array independent variable array: MUST BE SORTED y: 1D numpy array dependent variable array xval: float the x value at which you want to know the value of y Returns ======= y: float the value of y corresponding to xval Raises ====== ValueError: If arrays are unequal length; or x array is unsorted; or if xval falls outside the bounds of x (extrapolation is unsupported :version: 0.1 last modified 2006-07-06 def xyinterp(x,y,xval): """ :Purpose: Interpolates y based on the given xval. x and y are a pair of independent/dependent variable arrays that must be the same length. The x array must also be sorted. xval is a user-specified value. This routine looks up xval in the x array and uses that information to properly interpolate the value in the y array. Notes ===== Use the searchsorted method on the X array to determine the bin in which xval falls; then use that information to compute the corresponding y value. See Also ======== numpy Parameters ========== x: 1D numpy array independent variable array: MUST BE SORTED y: 1D numpy array dependent variable array xval: float the x value at which you want to know the value of y Returns ======= y: float the value of y corresponding to xval Raises ====== ValueError: If arrays are unequal length; or x array is unsorted; or if xval falls outside the bounds of x (extrapolation is unsupported :version: 0.1 last modified 2006-07-06 """ #Enforce conditions on x, y, and xval: #x and y must correspond if len(x) != len(y): raise ValueError("Input arrays must be equal lengths") #Extrapolation not supported if xval < x[0]: raise ValueError("Value %f < min(x) %f: Extrapolation unsupported"%(xval,x[0])) if xval > x[-1]: raise ValueError("Value > max(x): Extrapolation unsupported") #This algorithm only works on sorted data if x.argsort().all() != N.arange(len(x)).all(): raise ValueError("Input array x must be sorted") # Now do the real work. hi = x.searchsorted(xval) lo = hi - 1 try: seg = (float(xval)-x[lo]) / (x[hi] - x[lo]) except ZeroDivisionError: seg = 0.0 yval = y[lo] + seg*(y[hi] - y[lo]) return yval
updates the current record of the packet size per sample and the relationship between this and the fifo reads. def _setSampleSizeBytes(self): """ updates the current record of the packet size per sample and the relationship between this and the fifo reads. """ self.sampleSizeBytes = self.getPacketSize() if self.sampleSizeBytes > 0: self.maxBytesPerFifoRead = (32 // self.sampleSizeBytes)